| (old) | htmldiff from- | (new) |
This page displays common pseudocode functions shared by many pages.
// AArch32.VCRMatch() // ================== boolean AArch32.VCRMatch(bits(32) vaddress) if UsingAArch32() && ELUsingAArch32(EL1) && PSTATE.EL != EL2 then // Each bit position in this string corresponds to a bit in DBGVCR and an exception vector. match_word = Zeros(32); if vaddress<31:5> == ExcVectorBase()<31:5> then if HaveEL(EL3) && !IsSecure() then match_word<UInt(vaddress<4:2>) + 24> = '1'; // Non-secure vectors else match_word<UInt(vaddress<4:2>) + 0> = '1'; // Secure vectors (or no EL3) if HaveEL(EL3) && ELUsingAArch32(EL3) && IsSecure() && vaddress<31:5> == MVBAR<31:5> then match_word<UInt(vaddress<4:2>) + 8> = '1'; // Monitor vectors // Mask out bits not corresponding to vectors. if !HaveEL(EL3) then mask = '00000000':'00000000':'00000000':'11011110'; // DBGVCR[31:8] are RES0 elsif !ELUsingAArch32(EL3) then mask = '11011110':'00000000':'00000000':'11011110'; // DBGVCR[15:8] are RES0 else mask = '11011110':'00000000':'11011100':'11011110'; match_word = match_word AND DBGVCR AND mask; match = !IsZero(match_word); // Check for UNPREDICTABLE case - match on Prefetch Abort and Data Abort vectors if !IsZero(match_word<28:27,12:11,4:3>) && DebugTarget() == PSTATE.EL then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHDAPA); if !IsZero(vaddress<1:0>) && match then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); else match = FALSE; return match;
// AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // ======================================================== boolean AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled returns // the state of the (DBGEN AND SPIDEN) signal. if !HaveEL(EL3) && !IsSecure() then return FALSE; return DBGEN == HIGH && SPIDEN == HIGH;
// AArch32.BreakpointMatch()
// =========================
// Breakpoint matching in an AArch32 translation regime.
(boolean,boolean) AArch32.BreakpointMatch(integer n, bits(32) vaddress, integer size)
assert ELUsingAArch32(S1TranslationRegime());
assert n < assert n <= GetNumBreakpointsUInt();
(DBGDIDR.BRPs);
enabled = DBGBCR[n].E == '1';
ispriv = PSTATE.EL != EL0;
linked = DBGBCR[n].BT == '0x01';
isbreakpnt = TRUE;
linked_to = FALSE;
state_match = AArch32.StateMatch(DBGBCR[n].SSC, DBGBCR[n].HMC, DBGBCR[n].PMC,
linked, DBGBCR[n].LBN, isbreakpnt, ispriv);
(value_match, value_mismatch) = AArch32.BreakpointValueMatch(n, vaddress, linked_to);
if size == 4 then // Check second halfword
// If the breakpoint address and BAS of an Address breakpoint match the address of the
// second halfword of an instruction, but not the address of the first halfword, it is
// CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug
// event.
(match_i, mismatch_i) = AArch32.BreakpointValueMatch(n, vaddress + 2, linked_to);
if !value_match && match_i then
value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
if value_mismatch && !mismatch_i then
value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF);
if vaddress<1> == '1' && DBGBCR[n].BAS == '1111' then
// The above notwithstanding, if DBGBCR[n].BAS == '1111', then it is CONSTRAINED
// UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction
// at the address DBGBVR[n]+2.
if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
if !value_mismatch then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF);
match = value_match && state_match && enabled;
mismatch = value_mismatch && state_match && enabled;
return (match, mismatch);
// AArch32.BreakpointValueMatch()
// ==============================
// The first result is whether an Address Match or Context breakpoint is programmed on the
// instruction at "address". The second result is whether an Address Mismatch breakpoint is
// programmed on the instruction, that is, whether the instruction should be stepped.
(boolean,boolean) AArch32.BreakpointValueMatch(integer n, bits(32) vaddress, boolean linked_to)
// "n" is the identity of the breakpoint unit to match against.
// "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context
// matching breakpoints.
// "linked_to" is TRUE if this is a call from StateMatch for linking.
// If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives
// no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint.
if n >= if n > GetNumBreakpointsUInt() then
(DBGDIDR.BRPs) then
(c, n) = ConstrainUnpredictableInteger(0, GetNumBreakpointsUInt() - 1,(DBGDIDR.BRPs), Unpredictable_BPNOTIMPL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return (FALSE,FALSE);
// If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a
// call from StateMatch for linking).
if DBGBCR[n].E == '0' then return (FALSE,FALSE);
context_aware = (n >= ( context_aware = (n >=GetNumBreakpointsUInt() -(DBGDIDR.BRPs) - GetNumContextAwareBreakpointsUInt()));
(DBGDIDR.CTX_CMPs));
// If BT is set to a reserved type, behaves either as disabled or as a not-reserved type.
dbgtype = DBGBCR[n].BT;
if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching
(dbgtype == '010x' && HaltOnBreakpointOrWatchpoint()) || // Address mismatch
(dbgtype != '0x0x' && !context_aware) || // Context matching
(dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension
(c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return (FALSE,FALSE);
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
// Determine what to compare against.
match_addr = (dbgtype == '0x0x');
mismatch = (dbgtype == '010x');
match_vmid = (dbgtype == '10xx');
match_cid1 = (dbgtype == 'xx1x');
match_cid2 = (dbgtype == '11xx');
linked = (dbgtype == 'xxx1');
// If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a
// VMID and/or context ID match, of if not context-aware. The above assertions mean that the
// code can just test for match_addr == TRUE to confirm all these things.
if linked_to && (!linked || match_addr) then return (FALSE,FALSE);
// If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches.
if !linked_to && linked && !match_addr then return (FALSE,FALSE);
// Do the comparison.
if match_addr then
byte = UInt(vaddress<1:0>);
assert byte IN {0,2}; // "vaddress" is halfword aligned
byte_select_match = (DBGBCR[n].BAS<byte> == '1');
integer top = 31;
BVR_match = (vaddress<top:2> == DBGBVR[n]<top:2>) && byte_select_match;
elsif match_cid1 then
BVR_match = (PSTATE.EL != EL2 && CONTEXTIDR == DBGBVR[n]<31:0>);
if match_vmid then
if ELUsingAArch32(EL2) then
vmid = ZeroExtend(VTTBR.VMID, 16);
bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16);
elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then
vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16);
else
vmid = VTTBR_EL2.VMID;
bvr_vmid = DBGBXVR[n]<15:0>;
BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
vmid == bvr_vmid);
elsif match_cid2 then
BXVR_match = ((HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() &&
!ELUsingAArch32(EL2) &&
DBGBXVR[n]<31:0> == CONTEXTIDR_EL2<31:0>);
bvr_match_valid = (match_addr || match_cid1);
bxvr_match_valid = (match_vmid || match_cid2);
match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match);
return (match && !mismatch, !match && mismatch);
// AArch32.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
boolean AArch32.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, SSC, HMC, PxC) = CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt);
if c == Constraint_DISABLED then return FALSE;
// Otherwise the HMC,SSC,PxC values are either valid or the values returned by
// CheckValidStateMatch are valid.
PL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11');
PL1_match = PxC<0> == '1';
PL0_match = PxC<1> == '1';
SSU_match = isbreakpnt && HMC == '0' && PxC == '00' && SSC != '11';
if !ispriv && !isbreakpnt then
priv_match = PL0_match;
elsif SSU_match then
priv_match = PSTATE.M IN {M32_User,M32_Svc,M32_System};
else
case PSTATE.EL of
when EL3 priv_match = PL1_match; // EL3 and EL1 are both PL1
when EL2 priv_match = PL2_match;
when EL1 priv_match = PL1_match;
when EL0 priv_match = PL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = !IsSecure(); // Non-secure only
when '10' security_state_match = IsSecure(); // Secure only
when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn = UInt(LBN);
first_ctx_cmp = first_ctx_cmp = ( GetNumBreakpointsUInt() -(DBGDIDR.BRPs) - GetNumContextAwareBreakpointsUInt();
(DBGDIDR.CTX_CMPs));
last_ctx_cmp = GetNumBreakpointsUInt() - 1;
(DBGDIDR.BRPs);
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(32) UNKNOWN;
linked_to = TRUE;
(linked_match,-) = AArch32.BreakpointValueMatch(lbn, vaddress, linked_to);
return priv_match && security_state_match && (!linked || linked_match);
// AArch32.GenerateDebugExceptions() // ================================= boolean AArch32.GenerateDebugExceptions() return AArch32.GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure());
// AArch32.GenerateDebugExceptionsFrom()
// =====================================
boolean AArch32.GenerateDebugExceptionsFrom(bits(2) from, boolean secure)
if ! if from ==ELUsingAArch32EL0(&& ((from) || !ELStateUsingAArch32(EL1DebugTargetFromELIsInHost(secure)) then
mask = '0'; // No PSTATE.D in AArch32 state
, secure)) then
mask = bit UNKNOWN; // PSTATE.D mask, unused for EL0 case
return AArch64.GenerateDebugExceptionsFrom(from, secure, mask);
if DBGOSLSR.OSLK == '1' || DoubleLockStatus() || Halted() then
return FALSE;
if HaveEL(EL3) && secure then
assert from != EL2; // Secure EL2 always uses AArch64
if IsSecureEL2Enabled() then
// Implies that EL3 and EL2 both using AArch64
enabled = MDCR_EL3.SDD == '0';
else
spd = if ELUsingAArch32(EL3) then SDCR.SPD else MDCR_EL3.SPD32;
if spd<1> == '1' then
enabled = spd<0> == '1';
else
// SPD == 0b01 is reserved, but behaves the same as 0b00.
enabled = AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled();
if from == EL0 then enabled = enabled || SDER.SUIDEN == '1';
else
enabled = from != EL2;
return enabled;
// AArch32.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
boolean AArch32.CheckForPMUOverflow()
if !ELUsingAArch32(EL1) then return AArch64.CheckForPMUOverflow();
pmuirq = PMCR.E == '1' && PMINTENSET<31> == '1' && PMOVSSET<31> == '1';
for n = 0 to GetNumEventCountersUInt() - 1
(PMCR.N) - 1
if HaveEL(EL2) then
hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN;
hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME;
E = (if n < UInt(hpmn) then PMCR.E else hpme);
else
E = PMCR.E;
if E == '1' && PMINTENSET<n> == '1' && PMOVSSET<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW);
CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW);
// The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq;
// AArch32.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
boolean AArch32.CountEvents(integer n)
assert n == 31 || n < GetNumEventCountersUInt();
(PMCR.N);
if !ELUsingAArch32(EL1) then return AArch64.CountEvents(n);
// Event counting is disabled in Debug state
debug = Halted();
// In Non-secure state, some counters are reserved for EL2
if HaveEL(EL2) then
hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN;
hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME;
resvd_for_el2 = n >= UInt(hpmn) && n != 31;
else
resvd_for_el2 = FALSE;
// Main enable controls
if resvd_for_el2 then
E = if ELUsingAArch32(EL2) then HDCR.HPME else MDCR_EL2.HPME;
else
E = PMCR.E;
enabled = E == '1' && PMCNTENSET<n> == '1';
// Event counting is allowed unless it is prohibited by any rule below
prohibited = FALSE;
// Event counting in Secure state is prohibited if all of:
// * EL3 is implemented
// * One of the following is true:
// - EL3 is using AArch64, MDCR_EL3.SPME == 0, and either:
// - FEAT_PMUv3p7 is not implemented
// - MDCR_EL3.MPMX == 0
// - EL3 is using AArch32 and SDCR.SPME == 0
// * Not executing at EL0, or SDER.SUNIDEN == 0
if HaveEL(EL3) && IsSecure() then
spme = if ELUsingAArch32(EL3) then SDCR.SPME else MDCR_EL3.SPME;
if !ELUsingAArch32(EL3) && HavePMUv3p7() then
prohibited = spme == '0' && MDCR_EL3.MPMX == '0';
else
prohibited = spme == '0';
if prohibited && PSTATE.EL == EL0 then
prohibited = SDER.SUNIDEN == '0';
// Event counting at EL2 is prohibited if all of:
// * The HPMD Extension is implemented
// * PMNx is not reserved for EL2
// * HDCR.HPMD == 1
if !prohibited && PSTATE.EL == EL2 && HaveHPMDExt() && !resvd_for_el2 then
prohibited = HDCR.HPMD == '1';
// The IMPLEMENTATION DEFINED authentication interface might override software
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// PMCR.DP disables the cycle counter when event counting is prohibited
if enabled && prohibited && n == 31 then
enabled = PMCR.DP == '0';
// If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited.
// This is not overridden by PMCR.DP.
if Havev85PMU() && n == 31 then
if HaveEL(EL3) && IsSecure() then
sccd = if ELUsingAArch32(EL3) then SDCR.SCCD else MDCR_EL3.SCCD;
if sccd == '1' then prohibited = TRUE;
if PSTATE.EL == EL2 && HDCR.HCCD == '1' then
prohibited = TRUE;
// Event counting might be frozen
frozen = FALSE;
// If FEAT_PMUv3p7 is implemented, event counting can be frozen
if HavePMUv3p7() && n != 31 then
ovflw = PMOVSR<GetNumEventCountersUInt()-1:0>;
(PMCR.N)-1:0>;
if resvd_for_el2 then
FZ = if ELUsingAArch32(EL2) then HDCR.HPMFZO else MDCR_EL2.HPMFZO;
ovflw<UInt(hpmn)-1:0> = Zeros();
else
FZ = PMCR.FZO;
if HaveEL(EL2) then
ovflw<GetNumEventCountersUInt()-1:(PMCR.N)-1:UInt(hpmn)> = Zeros();
frozen = FZ == '1' && !IsZero(ovflw);
// Event counting can be filtered by the {P, U, NSK, NSU, NSH} bits
filter = if n == 31 then PMCCFILTR else PMEVTYPER[n];
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = NSH == '0';
when EL3 filtered = P == '1';
return !debug && enabled && !prohibited && !filtered && !frozen;
// AArch32.EnterHypModeInDebugState() // ================================== // Take an exception in Debug state to Hyp mode. AArch32.EnterHypModeInDebugState(ExceptionRecord exception) SynchronizeContext(); assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); AArch32.ReportHypEntry(exception); AArch32.WriteMode(M32_Hyp); SPSR[] = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); EndOfInstruction();
// AArch32.EnterModeInDebugState() // =============================== // Take an exception in Debug state to a mode other than Monitor and Hyp mode. AArch32.EnterModeInDebugState(bits(5) target_mode) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.EnterMonitorModeInDebugState() // ====================================== // Take an exception in Debug state to Monitor mode. AArch32.EnterMonitorModeInDebugState() SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = IsSecure(); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.WatchpointByteMatch()
// =============================
boolean AArch32.WatchpointByteMatch(integer n, bits(32) vaddress)
integer top = 31;
bottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR[n].BAS<UInt(vaddress<bottom-1:0>)> != '0');
mask = UInt(DBGWCR[n].MASK);
// If DBGWCR[n].MASK is non-zero value and DBGWCR[n].BAS is not set to '11111111', or
// DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && !IsOnes(DBGWCR[n].BAS) then
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS);
else
LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB);
if !IsZero(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
// If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB
// of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be
// included in the match.
if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then
if ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESSUnpredicatable_DBGxVR_RESS) then
top = 63;
WVR_match = (vaddress<top:mask> == DBGWVR[n]<top:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS);
else
WVR_match = vaddress<top:bottom> == DBGWVR[n]<top:bottom>;
return WVR_match && byte_select_match;
// AArch32.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch32 translation regime.
boolean AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv,
AccType acctype, boolean iswrite)
assert ELUsingAArch32(S1TranslationRegime());
assert n < assert n <= GetNumWatchpointsUInt();
(DBGDIDR.WRPs);
// "ispriv" is:
// * FALSE for all loads, stores, and atomic operations executed at EL0.
// * FALSE if the access is unprivileged.
// * TRUE for all other loads, stores, and atomic operations.
enabled = DBGWCR[n].E == '1';
linked = DBGWCR[n].WT == '1';
isbreakpnt = FALSE;
state_match = AArch32.StateMatch(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC,
linked, DBGWCR[n].LBN, isbreakpnt, ispriv);
ls_match = FALSE;
ls_match = (DBGWCR[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match || AArch32.WatchpointByteMatch(n, vaddress + byte);
return value_match && state_match && ls_match && enabled;
// AArch32.Abort() // =============== // Abort and Debug exception handling in an AArch32 translation regime. AArch32.Abort(bits(32) vaddress, FaultRecord fault) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && MDCR_EL2.TDE == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1' && IsExternalAbort(fault); if route_to_aarch64 then AArch64.Abort(ZeroExtend(vaddress), fault); elsif fault.acctype == AccType_IFETCH then AArch32.TakePrefetchAbortException(vaddress, fault); else AArch32.TakeDataAbortException(vaddress, fault);
// AArch32.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort exceptions taken to Hyp mode // from an AArch32 translation regime. ExceptionRecord AArch32.AbortSyndrome(Exception exceptype, FaultRecord fault, bits(32) vaddress) exception = ExceptionSyndrome(exceptype); d_side = exceptype == Exception_DataAbort; exception.syndrome = AArch32.FaultSyndrome(d_side, fault); exception.vaddress = ZeroExtend(vaddress); if IPAValid(fault) then exception.ipavalid = TRUE; exception.NS = fault.ipaddress.NS; exception.ipaddress = ZeroExtend(fault.ipaddress.address); else exception.ipavalid = FALSE; return exception;
// AArch32.CheckPCAlignment() // ========================== AArch32.CheckPCAlignment() bits(32) pc = ThisInstrAddr(); if (CurrentInstrSet() == InstrSet_A32 && pc<1> == '1') || pc<0> == '1' then if AArch32.GeneralExceptionsToAArch64() then AArch64.PCAlignmentFault(); // Generate an Alignment fault Prefetch Abort exception vaddress = pc; acctype = AccType_IFETCH; iswrite = FALSE; secondstage = FALSE; AArch32.Abort(vaddress, AArch32.AlignmentFault(acctype, iswrite, secondstage));
// AArch32.ReportDataAbort() // ========================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportDataAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR or DFSR can be Long-descriptor format or Short-descriptor // format. Normally, the current translation table format determines the format. For an abort // from Non-secure state to Monitor mode, the IFSR or DFSR uses the Long-descriptor format if // any of the following applies: // * The Secure TTBCR.EAE is set to 1. // * The abort is synchronous and either: // - It is taken from Hyp mode. // - It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = TTBCR_S.EAE == '1'; if !IsSErrorInterrupt(fault) && !long_format then long_format = PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; d_side = TRUE; if long_format then syndrome = AArch32.FaultStatusLD(d_side, fault); else syndrome = AArch32.FaultStatusSD(d_side, fault); if fault.acctype == AccType_IC then if (!long_format && boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then i_syndrome = syndrome; syndrome<10,3:0> = EncodeSDFSC(Fault_ICacheMaint, 1); else i_syndrome = bits(32) UNKNOWN; if route_to_monitor then IFSR_S = i_syndrome; else IFSR = i_syndrome; if route_to_monitor then DFSR_S = syndrome; DFAR_S = vaddress; else DFSR = syndrome; DFAR = vaddress; return;
// AArch32.ReportPrefetchAbort() // ============================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportPrefetchAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format. // Normally, the current translation table format determines the format. For an abort from // Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the // following applies: // * The Secure TTBCR.EAE is set to 1. // * It is taken from Hyp mode. // * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = TTBCR_S.EAE == '1' || PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; d_side = FALSE; if long_format then fsr = AArch32.FaultStatusLD(d_side, fault); else fsr = AArch32.FaultStatusSD(d_side, fault); if route_to_monitor then IFSR_S = fsr; IFAR_S = vaddress; else IFSR = fsr; IFAR = vaddress; return;
// AArch32.TakeDataAbortException() // ================================ AArch32.TakeDataAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1'))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePrefetchAbortException() // ==================================== AArch32.TakePrefetchAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1'))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0C; lr_offset = 4; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then if fault.statuscode == Fault_Alignment then // PC Alignment fault exception = ExceptionSyndrome(Exception_PCAlignment); exception.vaddress = ThisInstrAddr(); else exception = AArch32.AbortSyndrome(Exception_InstructionAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalFIQException() // ================================== AArch32.TakePhysicalFIQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.FIQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalFIQException(); route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.FMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x1C; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_FIQ); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception. AArch32.TakePhysicalIRQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.IRQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalIRQException(); route_to_monitor = HaveEL(EL3) && SCR.IRQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.IMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_IRQ); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalSErrorException()
// =====================================
AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) pe_error_state,
bits(25) full_syndrome)
AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) errortype,
boolean impdef_syndrome, bits(24) full_syndrome)
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then
route_to_aarch64 = (HCR_EL2.TGE == '1' || (!IsInHost() && HCR_EL2.AMO == '1'));
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1';
if route_to_aarch64 then
AArch64.TakePhysicalSErrorException(full_syndrome);
(impdef_syndrome, full_syndrome);
route_to_monitor = HaveEL(EL3) && SCR.EA == '1';
route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR.TGE == '1' || HCR.AMO == '1'));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x10;
lr_offset = 8;
bits(2) target_el;
if route_to_monitor then
target_el = if EL3;
elsif PSTATE.EL == EL2 || route_to_hyp then
target_el = EL2;
else
target_el = EL1;
if IsSErrorEdgeTriggered(target_el, full_syndrome) then(full_syndrome) then
ClearPendingPhysicalSError();
fault = AArch32.AsynchExternalAbort(parity, pe_error_state, extflag);
(parity, errortype, extflag);
vaddress = bits(32) UNKNOWN;
case target_el of
when if route_to_monitor then EL3AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
when elsif PSTATE.EL == EL2
exception =|| route_to_hyp then
exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress);
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
when else EL1AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
otherwise
Unreachable();, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualFIQException() // ================================= AArch32.TakeVirtualFIQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQ enabled if TGE==0 and FMO==1 assert HCR.TGE == '0' && HCR.FMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualFIQException(); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x1C; lr_offset = 4; AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualIRQException() // ================================= AArch32.TakeVirtualIRQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQs enabled if TGE==0 and IMO==1 assert HCR.TGE == '0' && HCR.IMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualIRQException(); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; lr_offset = 4; AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualSErrorException()
// ====================================
AArch32.TakeVirtualSErrorException(bit extflag, bits(2) pe_error_state, bits(25) full_syndrome)
AArch32.TakeVirtualSErrorException(bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome)
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
if ELUsingAArch32(EL2) then // Virtual SError enabled if TGE==0 and AMO==1
assert HCR.TGE == '0' && HCR.AMO == '1';
else
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1';
// Check if routed to AArch64 state
if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualSErrorException(full_syndrome);
(impdef_syndrome, full_syndrome);
route_to_monitor = FALSE;
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x10;
lr_offset = 8;
vaddress = bits(32) UNKNOWN;
parity = FALSE;
if HaveRASExt() then
if ELUsingAArch32(EL2) then
fault = AArch32.AsynchExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT);
else
fault = AArch32.AsynchExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT);
else
fault = AArch32.AsynchExternalAbort(parity, pe_error_state, extflag);(parity, errortype, extflag);
ClearPendingVirtualSError();
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.SoftwareBreakpoint() // ============================ AArch32.SoftwareBreakpoint(bits(16) immediate) if (EL2Enabled() && !ELUsingAArch32(EL2) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !ELUsingAArch32(EL1) then AArch64.SoftwareBreakpoint(immediate); vaddress = bits(32) UNKNOWN; acctype = AccType_IFETCH; // Take as a Prefetch Abort iswrite = FALSE; entry = DebugException_BKPT; fault = AArch32.DebugFault(acctype, iswrite, entry); AArch32.Abort(vaddress, fault);
constant bits(4) DebugException_Breakpoint = '0001'; constant bits(4) DebugException_BKPT = '0011'; constant bits(4) DebugException_VectorCatch = '0101'; constant bits(4) DebugException_Watchpoint = '1010';
// AArch32.CheckAdvSIMDOrFPRegisterTraps() // ======================================= // Check if an instruction that accesses an Advanced SIMD and // floating-point System register is trapped by an appropriate HCR.TIDx // ID group trap control. AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg) if PSTATE.EL == EL1 && EL2Enabled() then tid0 = if ELUsingAArch32(EL2) then HCR.TID0 else HCR_EL2.TID0; tid3 = if ELUsingAArch32(EL2) then HCR.TID3 else HCR_EL2.TID3; if (tid0 == '1' && reg == '0000') // FPSID || (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x8); // Exception_AdvSIMDFPAccessTrap else AArch64.AArch32SystemAccessTrap(EL2, 0x8); // Exception_AdvSIMDFPAccessTrap
// AArch32.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in HSR (integer,bit) AArch32.ExceptionClass(Exception exceptype) il_is_valid = TRUE; case exceptype of when Exception_Uncategorized ec = 0x00; il_is_valid = FALSE; when Exception_WFxTrap ec = 0x01; when Exception_CP15RTTrap ec = 0x03; when Exception_CP15RRTTrap ec = 0x04; when Exception_CP14RTTrap ec = 0x05; when Exception_CP14DTTrap ec = 0x06; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_LDST64BTrap ec = 0x0A; when Exception_CP14RRTTrap ec = 0x0C; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE; when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_FPTrappedException ec = 0x28; otherwise Unreachable(); if ec IN {0x20,0x24} && PSTATE.EL == EL2 then ec = ec + 1; if il_is_valid then il = if ThisInstrLength() == 32 then '1' else '0'; else il = '1'; return (ec,il);
// AArch32.GeneralExceptionsToAArch64() // ==================================== // Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception // level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2 // is using AArch64. boolean AArch32.GeneralExceptionsToAArch64() return ((PSTATE.EL == EL0 && !ELUsingAArch32(EL1)) || (EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1'));
// AArch32.ReportHypEntry() // ======================== // Report syndrome information to Hyp mode registers. AArch32.ReportHypEntry(ExceptionRecord exception) Exception exceptype = exception.exceptype; (ec,il) = AArch32.ExceptionClass(exceptype); iss = exception.syndrome; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; HSR = ec<5:0>:il:iss; if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment} then HIFAR = exception.vaddress<31:0>; HDFAR = bits(32) UNKNOWN; elsif exceptype == Exception_DataAbort then HIFAR = bits(32) UNKNOWN; HDFAR = exception.vaddress<31:0>; if exception.ipavalid then HPFAR<31:4> = exception.ipaddress<39:12>; else HPFAR<31:4> = bits(28) UNKNOWN; return;
// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values. AArch32.ResetControlRegisters(boolean cold_reset);
// AArch32.TakeReset() // =================== // Reset into AArch32 state AArch32.TakeReset(boolean cold_reset) assert HighestELUsingAArch32(); // Enter the highest implemented Exception level in AArch32 state if HaveEL(EL3) then AArch32.WriteMode(M32_Svc); SCR.NS = '0'; // Secure state elsif HaveEL(EL2) then AArch32.WriteMode(M32_Hyp); else AArch32.WriteMode(M32_Svc); // Reset the CP14 and CP15 registers and other system components AArch32.ResetControlRegisters(cold_reset); FPEXC.EN = '0'; // Reset all other PSTATE fields, including instruction set and endianness according to the // SCTLR values produced by the above call to ResetControlRegisters() PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked PSTATE.IT = '00000000'; // IT block state reset PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0. PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // R14 or ELR_hyp and SPSR have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. AArch32.ResetGeneralRegisters(); AArch32.ResetSIMDFPRegisters(); AArch32.ResetSpecialRegisters(); ResetExternalDebugRegisters(cold_reset); bits(32) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then if MVBAR<0> == '1' then // Reset vector in MVBAR rv = MVBAR<31:1>:'0'; else rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address"; else rv = RVBAR<31:1>:'0'; // The reset vector must be correctly aligned assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0'); BranchTo(rv, BranchType_RESET);
// ExcVectorBase() // =============== bits(32) ExcVectorBase() if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000 return Ones(16):Zeros(16); else return VBAR<31:5>:Zeros(5);
// AArch32.FPTrappedException() // ============================ AArch32.FPTrappedException(bits(8) accumulated_exceptions) if AArch32.GeneralExceptionsToAArch64() then is_ase = FALSE; element = 0; AArch64.FPTrappedException(is_ase, accumulated_exceptions); FPEXC.DEX = '1'; FPEXC.TFV = '1'; FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF FPEXC<10:8> = '111'; // VECITR is RES1 AArch32.TakeUndefInstrException();
// AArch32.CallHypervisor() // ======================== // Performs a HVC call AArch32.CallHypervisor(bits(16) immediate) assert HaveEL(EL2); if !ELUsingAArch32(EL2) then AArch64.CallHypervisor(immediate); else AArch32.TakeHVCException(immediate);
// AArch32.CallSupervisor() // ======================== // Calls the Supervisor AArch32.CallSupervisor(bits(16) immediate) if AArch32.CurrentCond() != '1110' then immediate = bits(16) UNKNOWN; if AArch32.GeneralExceptionsToAArch64() then AArch64.CallSupervisor(immediate); else AArch32.TakeSVCException(immediate);
// AArch32.TakeHVCException() // ========================== AArch32.TakeHVCException(bits(16) immediate) assert HaveEL(EL2) && ELUsingAArch32(EL2); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; exception = ExceptionSyndrome(Exception_HypervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
// AArch32.TakeSMCException() // ========================== AArch32.TakeSMCException() assert HaveEL(EL3) && ELUsingAArch32(EL3); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; lr_offset = 0; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeSVCException() // ========================== AArch32.TakeSVCException(bits(16) immediate) AArch32.ITAdvance(); SSAdvance(); route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; lr_offset = 0; if PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Svc, preferred_exception_return, lr_offset, vect_offset);
// AArch32.EnterHypMode()
// ======================
// Take an exception to Hyp mode.
AArch32.EnterHypMode(ExceptionRecord exception, bits(32) preferred_exception_return,
integer vect_offset)
SynchronizeContext();
assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2);
bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState);
if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then
AArch32.ReportHypEntry(exception);
AArch32.WriteMode(M32_Hyp);
SPSR[] = spsr;
ELR_hyp = preferred_exception_return;
PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
ShouldAdvanceSS = FALSE;
if !HaveEL(EL3) || SCR_GEN[].EA == '0' then PSTATE.A = '1';
if !HaveEL(EL3) || SCR_GEN[].IRQ == '0' then PSTATE.I = '1';
if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1';
PSTATE.E = HSCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HaveSSBSExt() then PSTATE.SSBS = HSCTLR.DSSBS;
BranchTo(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstruction();
// AArch32.EnterMode()
// ===================
// Take an exception to a mode other than Monitor and Hyp mode.
AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)
SynchronizeContext();
assert ELUsingAArch32(EL1) && PSTATE.EL != EL2;
bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState);
if PSTATE.M == M32_Monitor then SCR.NS = '0';
AArch32.WriteMode(target_mode);
SPSR[] = spsr;
R[14] = preferred_exception_return + lr_offset;
PSTATE.T = SCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
ShouldAdvanceSS = FALSE;
if target_mode == M32_FIQ then
PSTATE.<A,I,F> = '111';
elsif target_mode IN {M32_Abort, M32_IRQ} then
PSTATE.<A,I> = '11';
else
PSTATE.I = '1';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS;
BranchTo(ExcVectorBase()<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstruction();
// AArch32.EnterMonitorMode()
// ==========================
// Take an exception to Monitor mode.
AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)
SynchronizeContext();
assert HaveEL(EL3) && ELUsingAArch32(EL3);
from_secure = IsSecure();
bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState);
if PSTATE.M == M32_Monitor then SCR.NS = '0';
AArch32.WriteMode(M32_Monitor);
SPSR[] = spsr;
R[14] = preferred_exception_return + lr_offset;
PSTATE.T = SCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
ShouldAdvanceSS = FALSE;
PSTATE.<A,I,F> = '111';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HavePANExt() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS;
BranchTo(MVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstruction();
// AArch32.CheckAdvSIMDOrFPEnabled() // ================================= // Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3. AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd) if PSTATE.EL == EL0 && (!HaveEL(EL2) || (!ELUsingAArch32(EL2) && HCR_EL2.TGE == '0')) && !ELUsingAArch32(EL1) then // The PE behaves as if FPEXC.EN is 1 AArch64.CheckFPAdvSIMDEnabled(); elsif PSTATE.EL == EL0 && HaveEL(EL2) && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' && !ELUsingAArch32(EL1) then if fpexc_check && HCR_EL2.RW == '0' then fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0"; if fpexc_en == '0' then UNDEFINED; AArch64.CheckFPAdvSIMDEnabled(); else cpacr_asedis = CPACR.ASEDIS; cpacr_cp10 = CPACR.cp10; if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then cpacr_asedis = '1'; if NSACR.cp10 == '0' then cpacr_cp10 = '00'; if PSTATE.EL != EL2 then // Check if Advanced SIMD disabled in CPACR if advsimd && cpacr_asedis == '1' then UNDEFINED; // Check if access disabled in CPACR case cpacr_cp10 of when '00' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then UNDEFINED; // If required, check FPEXC enabled bit. if fpexc_check && FPEXC.EN == '0' then UNDEFINED; AArch32.CheckFPAdvSIMDTrap(advsimd); // Also check against HCPTR and CPTR_EL3
// AArch32.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3. AArch32.CheckFPAdvSIMDTrap(boolean advsimd) if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckFPAdvSIMDTrap(); else if HaveEL(EL2) && !IsSecure() then hcptr_tase = HCPTR.TASE; hcptr_cp10 = HCPTR.TCP10; if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then hcptr_tase = '1'; if NSACR.cp10 == '0' then hcptr_cp10 = '1'; // Check if access disabled in HCPTR if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); exception.syndrome<24:20> = ConditionSyndrome(); if advsimd then exception.syndrome<5> = '1'; else exception.syndrome<5> = '0'; exception.syndrome<3:0> = '1010'; // coproc field, always 0xA if PSTATE.EL == EL2 then AArch32.TakeUndefInstrException(exception); else AArch32.TakeHypTrapException(exception); if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3); return;
// AArch32.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction AArch32.CheckForSMCUndefOrTrap() if !HaveEL(EL3) || PSTATE.EL == EL0 then UNDEFINED; if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckForSMCUndefOrTrap(Zeros(16)); else route_to_hyp = HaveEL(EL2) && !IsSecure() && PSTATE.EL == EL1 && HCR.TSC == '1'; if route_to_hyp then exception = ExceptionSyndrome(Exception_MonitorCall); AArch32.TakeHypTrapException(exception);
// AArch32.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction AArch32.CheckForSVCTrap(bits(16) immediate) if HaveFGTExt() then route_to_el2 = FALSE; if PSTATE.EL == EL0 then route_to_el2 = (!ELUsingAArch32(EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
// AArch32.CheckForWFxTrap()
// =========================
// Check for trap on WFE or WFI instruction
AArch32.CheckForWFxTrap(bits(2) target_el,AArch32.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assert WFxType wfxtype)
assert HaveEL(target_el);
// Check for routing to AArch64
if !ELUsingAArch32(target_el) then
AArch64.CheckForWFxTrap(target_el, wfxtype);
(target_el, is_wfe);
return;
boolean is_wfe = wfxtype IN { case target_el of
whenWFxType_WFE, WFxType_WFET};
case target_el of
when EL1
trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0';
when EL2
trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1';
when EL3
trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1';
if trap then
if target_el == EL1 && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' then
AArch64.WFxTrap(wfxtype, target_el);
(target_el, is_wfe);
if target_el == EL3 then
AArch32.TakeMonitorTrapException();
elsif target_el == EL2 then
exception = ExceptionSyndrome(Exception_WFxTrap);
exception.syndrome<24:20> = ConditionSyndrome();
case wfxtype of
when WFxType_WFI
exception.syndrome<0> = '0';
when WFxType_WFE
exception.syndrome<0> = '1';();
exception.syndrome<0> = if is_wfe then '1' else '0';
AArch32.TakeHypTrapException(exception);
else
AArch32.TakeUndefInstrException();
// AArch32.CheckITEnabled() // ======================== // Check whether the T32 IT instruction is disabled. AArch32.CheckITEnabled(bits(4) mask) if PSTATE.EL == EL2 then it_disabled = HSCTLR.ITD; else it_disabled = (if ELUsingAArch32(EL1) then SCTLR.ITD else SCTLR[].ITD); if it_disabled == '1' then if mask != '1000' then UNDEFINED; // Otherwise whether the IT block is allowed depends on hw1 of the next instruction. next_instr = AArch32.MemSingle[NextInstrAddr(), 2, AccType_IFETCH, TRUE]; if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx', '01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then // It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is // taken on the IT instruction or the next instruction. This is not reflected in // the pseudocode, which always takes the exception on the IT instruction. This // also does not take into account cases where the next instruction is UNPREDICTABLE. UNDEFINED; return;
// AArch32.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set. AArch32.CheckIllegalState() if AArch32.GeneralExceptionsToAArch64() then AArch64.CheckIllegalState(); elsif PSTATE.IL == '1' then route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; if PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_IllegalState); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.TakeUndefInstrException();
// AArch32.CheckSETENDEnabled() // ============================ // Check whether the AArch32 SETEND instruction is disabled. AArch32.CheckSETENDEnabled() if PSTATE.EL == EL2 then setend_disabled = HSCTLR.SED; else setend_disabled = (if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED); if setend_disabled == '1' then UNDEFINED; return;
// AArch32.SystemAccessTrap() // ========================== // Trapped system register access. AArch32.SystemAccessTrap(bits(5) mode, integer ec) (valid, target_el) = ELFromM32(mode); assert valid && HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); if target_el == EL2 then exception = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(exception); else AArch32.TakeUndefInstrException();
// AArch32.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. ExceptionRecord AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec) ExceptionRecord exception; case ec of when 0x0 exception = ExceptionSyndrome(Exception_Uncategorized); when 0x3 exception = ExceptionSyndrome(Exception_CP15RTTrap); when 0x4 exception = ExceptionSyndrome(Exception_CP15RRTTrap); when 0x5 exception = ExceptionSyndrome(Exception_CP14RTTrap); when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap); when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap); when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(); if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID iss<13:10> = instr<19:16>; // CRn, Reg in case of VMRS iss<8:5> = instr<15:12>; // Rt iss<9> = '0'; // RES0 if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<4:1> = instr<3:0>; //CRm else //VMRS Access iss<19:17> = '000'; //opc2 - Hardcoded for VMRS iss<16:14> = '111'; //opc1 - Hardcoded for VMRS iss<4:1> = '0000'; //CRm - Hardcoded for VMRS elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 iss<13:10> = instr<19:16>; // Rt2 iss<8:5> = instr<15:12>; // Rt iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<8:5> = bits(4) UNKNOWN; iss<3> = '1'; elsif exception.exceptype == Exception_Uncategorized then // Trapped for unknown reason iss<8:5> = instr<19:16>; // Rn iss<3> = '0'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> = ConditionSyndrome(); exception.syndrome<19:0> = iss; return exception;
// AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(integer ec) exception = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(exception); // AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(ExceptionRecord exception) assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x14; AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
// AArch32.TakeMonitorTrapException() // ================================== // Exceptions routed to Monitor mode as a Monitor Trap exception. AArch32.TakeMonitorTrapException() assert HaveEL(EL3) && ELUsingAArch32(EL3); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException() exception = ExceptionSyndrome(Exception_Uncategorized); AArch32.TakeUndefInstrException(exception); // AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException(ExceptionRecord exception) route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); elsif route_to_hyp then AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset);
// AArch32.UndefinedFault() // ======================== AArch32.UndefinedFault() if AArch32.GeneralExceptionsToAArch64() then AArch64.UndefinedFault(); AArch32.TakeUndefInstrException();
// AArch32.CreateFaultRecord() // =========================== FaultRecord AArch32.CreateFaultRecord(Fault statuscode, bits(40) ipaddress, bits(4) domain, integer level, AccType acctype, boolean write, bit extflag, bits(4) debugmoe, bits(2) errortype, boolean secondstage, boolean s2fs1walk) FaultRecord fault; fault.statuscode = statuscode; if (statuscode != Fault_None && PSTATE.EL != EL2 && TTBCR.EAE == '0' && !secondstage && !s2fs1walk && AArch32.DomainValid(statuscode, level)) then fault.domain = domain; else fault.domain = bits(4) UNKNOWN; fault.debugmoe = debugmoe; fault.errortype = errortype; fault.ipaddress.NS = bit UNKNOWN; fault.ipaddress.address = ZeroExtend(ipaddress); fault.level = level; fault.acctype = acctype; fault.write = write; fault.extflag = extflag; fault.secondstage = secondstage; fault.s2fs1walk = s2fs1walk; return fault;
// AArch32.DomainValid() // ===================== // Returns TRUE if the Domain is valid for a Short-descriptor translation scheme. boolean AArch32.DomainValid(Fault statuscode, integer level) assert statuscode != Fault_None; case statuscode of when Fault_Domain return TRUE; when Fault_Translation, Fault_AccessFlag, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk return level == 2; otherwise return FALSE;
// AArch32.FaultStatusLD()
// =======================
// Creates an exception fault status value for Abort and Watchpoint exceptions taken
// to Abort mode using AArch32 and Long-descriptor format.
bits(32) AArch32.FaultStatusLD(boolean d_side, FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(32) fsr = Zeros();
if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype;
if d_side then
if fault.acctype IN {AccType_DC, AccType_IC,
AccType_AT,} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if AccType_ATPAN} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then fsr<12> = fault.extflag;
fsr<9> = '1';
fsr<5:0> = EncodeLDFSC(fault.statuscode, fault.level);
return fsr;
// AArch32.FaultStatusSD()
// =======================
// Creates an exception fault status value for Abort and Watchpoint exceptions taken
// to Abort mode using AArch32 and Short-descriptor format.
bits(32) AArch32.FaultStatusSD(boolean d_side, FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(32) fsr = Zeros();
if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype;
if d_side then
if fault.acctype IN {AccType_DC, AccType_IC,
AccType_AT,} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if AccType_ATPAN} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then fsr<12> = fault.extflag;
fsr<9> = '0';
fsr<10,3:0> = EncodeSDFSC(fault.statuscode, fault.level);
if d_side then
fsr<7:4> = fault.domain; // Domain field (data fault only)
return fsr;
// AArch32.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// AArch32 Hyp mode.
bits(25) AArch32.FaultSyndrome(boolean d_side, FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(25) iss = Zeros();
if HaveRASExt() && IsAsyncAbort(fault) then
iss<11:10> = fault.errortype; // AET
if d_side then
if (IsSecondStage(fault) && !fault.s2fs1walk &&
(!IsExternalSyncAbort(fault) ||
(!HaveRASExt() && fault.acctype == AccType_TTW &&
boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk"))) then
boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk")) ) then
iss<24:14> = LSInstructionSyndrome();
if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT, AccType_ATPAN} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level);
return iss;
// EncodeSDFSC() // ============= // Function that gives the Short-descriptor FSR code for different types of Fault bits(5) EncodeSDFSC(Fault statuscode, integer level) bits(5) result; case statuscode of when Fault_AccessFlag assert level IN {1,2}; result = if level == 1 then '00011' else '00110'; when Fault_Alignment result = '00001'; when Fault_Permission assert level IN {1,2}; result = if level == 1 then '01101' else '01111'; when Fault_Domain assert level IN {1,2}; result = if level == 1 then '01001' else '01011'; when Fault_Translation assert level IN {1,2}; result = if level == 1 then '00101' else '00111'; when Fault_SyncExternal result = '01000'; when Fault_SyncExternalOnWalk assert level IN {1,2}; result = if level == 1 then '01100' else '01110'; when Fault_SyncParity result = '11001'; when Fault_SyncParityOnWalk assert level IN {1,2}; result = if level == 1 then '11100' else '11110'; when Fault_AsyncParity result = '11000'; when Fault_AsyncExternal result = '10110'; when Fault_Debug result = '00010'; when Fault_TLBConflict result = '10000'; when Fault_Lockdown result = '10100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '10101'; // IMPLEMENTATION DEFINED when Fault_ICacheMaint result = '00100'; otherwise Unreachable(); return result;
// A32ExpandImm() // ============== bits(32) A32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = A32ExpandImm_C(imm12, PSTATE.C); return imm32;
// A32ExpandImm_C() // ================ (bits(32), bit) A32ExpandImm_C(bits(12) imm12, bit carry_in) unrotated_value = ZeroExtend(imm12<7:0>, 32); (imm32, carry_out) = Shift_C(unrotated_value, SRType_ROR, 2*UInt(imm12<11:8>), carry_in); return (imm32, carry_out);
// DecodeImmShift() // ================ (SRType, integer) DecodeImmShift(bits(2) srtype, bits(5) imm5) case srtype of when '00' shift_t = SRType_LSL; shift_n = UInt(imm5); when '01' shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '10' shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '11' if imm5 == '00000' then shift_t = SRType_RRX; shift_n = 1; else shift_t = SRType_ROR; shift_n = UInt(imm5); return (shift_t, shift_n);
// DecodeRegShift() // ================ SRType DecodeRegShift(bits(2) srtype) case srtype of when '00' shift_t = SRType_LSL; when '01' shift_t = SRType_LSR; when '10' shift_t = SRType_ASR; when '11' shift_t = SRType_ROR; return shift_t;
// RRX() // ===== bits(N) RRX(bits(N) x, bit carry_in) (result, -) = RRX_C(x, carry_in); return result;
// RRX_C() // ======= (bits(N), bit) RRX_C(bits(N) x, bit carry_in) result = carry_in : x<N-1:1>; carry_out = x<0>; return (result, carry_out);
enumeration SRType {SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};
// Shift() // ======= bits(N) Shift(bits(N) value, SRType srtype, integer amount, bit carry_in) (result, -) = Shift_C(value, srtype, amount, carry_in); return result;
// Shift_C() // ========= (bits(N), bit) Shift_C(bits(N) value, SRType srtype, integer amount, bit carry_in) assert !(srtype == SRType_RRX && amount != 1); if amount == 0 then (result, carry_out) = (value, carry_in); else case srtype of when SRType_LSL (result, carry_out) = LSL_C(value, amount); when SRType_LSR (result, carry_out) = LSR_C(value, amount); when SRType_ASR (result, carry_out) = ASR_C(value, amount); when SRType_ROR (result, carry_out) = ROR_C(value, amount); when SRType_RRX (result, carry_out) = RRX_C(value, carry_in); return (result, carry_out);
// T32ExpandImm() // ============== bits(32) T32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = T32ExpandImm_C(imm12, PSTATE.C); return imm32;
// T32ExpandImm_C() // ================ (bits(32), bit) T32ExpandImm_C(bits(12) imm12, bit carry_in) if imm12<11:10> == '00' then case imm12<9:8> of when '00' imm32 = ZeroExtend(imm12<7:0>, 32); when '01' imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>; when '10' imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000'; when '11' imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>; carry_out = carry_in; else unrotated_value = ZeroExtend('1':imm12<6:0>, 32); (imm32, carry_out) = ROR_C(unrotated_value, UInt(imm12<11:7>)); return (imm32, carry_out);
// AArch32.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained CP15 traps in HSTR and HCR. boolean AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) // Check for coarse-grained Hyp traps if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then if PSTATE.EL == EL0 && !ELUsingAArch32(EL2) then return AArch64.CheckCP15InstrCoarseTraps(CRn, nreg, CRm); // Check for MCR, MRC, MCRR and MRRC disabled by HSTR<CRn/CRm> major = if nreg == 1 then CRn else CRm; if !(major IN {4,14}) && HSTR<major> == '1' then return TRUE; // Check for MRC and MCR disabled by HCR.TIDCP if (HCR.TIDCP == '1' && nreg == 1 && ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then return TRUE; return FALSE;
// AArch32.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean AArch32.ExclusiveMonitorsPass(bits(32) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype = AccType_ATOMIC; iswrite = TRUE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); passed = AArch32.IsExclusiveVA(address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);
// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);
// AArch32.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. AArch32.SetExclusiveMonitors(bits(32) address, integer size) acctype = AccType_ATOMIC; iswrite = FALSE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch32.MarkExclusiveVA(address, ProcessorID(), size);
// CheckAdvSIMDEnabled() // ===================== CheckAdvSIMDEnabled() fpexc_check = TRUE; advsimd = TRUE; AArch32.CheckAdvSIMDOrFPEnabled(fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted // Make temporary copy of D registers // _Dclone[] is used as input data for instruction pseudocode for i = 0 to 31 _Dclone[i] = D[i]; return;
// CheckAdvSIMDOrVFPEnabled() // ========================== CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd) AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// CheckCryptoEnabled32() // ====================== CheckCryptoEnabled32() CheckAdvSIMDEnabled(); // Return from CheckAdvSIMDEnabled() occurs only if access is permitted return;
// CheckVFPEnabled() // ================= CheckVFPEnabled(boolean include_fpexc_check) advsimd = FALSE; AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// FPHalvedSub() // ============= bits(N) FPHalvedSub(bits(N) op1, bits(N) op2, FPCRType fpcr) assert N IN {16,32,64}; rounding = FPRoundingMode(fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 != sign2 then result = FPZero(sign1); else result_value = (value1 - value2) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr); return result;
// FPRSqrtStep() // ============= bits(N) FPRSqrtStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) three = FPThree('0'); result = FPHalvedSub(three, product, fpcr); return result;
// FPRecipStep() // ============= bits(N) FPRecipStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) two = FPTwo('0'); result = FPSub(two, product, fpcr); return result;
// StandardFPSCRValue() // ==================== FPCRType StandardFPSCRValue() bits(32) upper = '00000000000000000000000000000000'; bits(32) lower = '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000'; return upper : lower;
// AArch32.CheckAlignment()
// ========================
boolean AArch32.CheckAlignment(bits(32) address, integer alignment, AccType acctype,
boolean iswrite)
if PSTATE.EL == EL0 && !ELUsingAArch32(S1TranslationRegime()) then
A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64
elsif PSTATE.EL == EL2 then
A = HSCTLR.A;
else
A = SCTLR.A;
aligned = (address == Align(address, alignment));
atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC,
AccType_ORDEREDATOMICRW, AccType_ATOMICLS64,};
ordered = acctype IN { AccType_A32LSMD};
ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
vector = acctype == AccType_VEC;
// AccType_VEC is used for SIMD element alignment checks only
check = (atomic || ordered || vector || A == '1');
if check && !aligned then
secondstage = FALSE;
AArch32.Abort(address, AArch32.AlignmentFault(acctype, iswrite, secondstage));
return aligned;
// AArch32.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
value = _Mem[memaddrdesc, size, accdesc, FALSE];
return value;
// AArch32.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes. if
HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite);
value = _Mem[memaddrdesc, size, accdesc, FALSE];
return value;
// AArch32.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes.
AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(ZeroExtend(acctype);
(address, 64), acctype, iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;
// MemA[] - non-assignment form // ============================ bits(8*size) MemA[bits(32) address, integer size] acctype = AccType_ATOMIC; return Mem_with_type[address, size, acctype]; // MemA[] - assignment form // ======================== MemA[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ATOMIC; Mem_with_type[address, size, acctype] = value; return;
// MemO[] - non-assignment form // ============================ bits(8*size) MemO[bits(32) address, integer size] acctype = AccType_ORDERED; return Mem_with_type[address, size, acctype]; // MemO[] - assignment form // ======================== MemO[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ORDERED; Mem_with_type[address, size, acctype] = value; return;
// MemS[] - non-assignment form
// MemU[] - non-assignment form
// ============================
// Memory accessor for streaming load multiple instructions
bits(8*size) MemS[bits(32) address, integer size]
MemU[bits(32) address, integer size]
acctype = AccType_A32LSMDAccType_NORMAL;
return Mem_with_type[address, size, acctype];
// MemS[] - assignment form
// ========================
// Memory accessor for streaming store multiple instructions// MemU[] - assignment form
// ========================
MemS[bits(32) address, integer size] = bits(8*size) value
MemU[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_A32LSMDAccType_NORMAL;
Mem_with_type[address, size, acctype] = value;
return;
// MemU[] - non-assignment form
// ============================
// MemU_unpriv[] - non-assignment form
// ===================================
bits(8*size) MemU[bits(32) address, integer size]
MemU_unpriv[bits(32) address, integer size]
acctype = AccType_NORMALAccType_UNPRIV;
return Mem_with_type[address, size, acctype];
// MemU[] - assignment form
// ========================// MemU_unpriv[] - assignment form
// ===============================
MemU[bits(32) address, integer size] = bits(8*size) value
MemU_unpriv[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_NORMALAccType_UNPRIV;
Mem_with_type[address, size, acctype] = value;
return;
// MemU_unpriv[] - non-assignment form
// ===================================
// Mem_with_type[] - non-assignment (read) form
// ============================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch32.MemSingle directly.
bits(8*size)bits(size*8) MemU_unpriv[bits(32) address, integer size]
acctype =Mem_with_type[bits(32) address, integer size, AccType_UNPRIVAccType;
returnacctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned = Mem_with_typeAArch32.CheckAlignment[address, size, acctype];
// MemU_unpriv[] - assignment form
// ===============================(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
value<7:0> =
AArch32.MemSingle[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> = AArch32.MemSingle[address+i, 1, acctype, aligned];
else
value = AArch32.MemSingle[address, size, acctype, aligned];
if BigEndian(acctype) then
value = BigEndianReverse(value);
return value;
// Mem_with_type[] - assignment (write) form
// =========================================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.
MemU_unpriv[bits(32) address, integer size] = bits(8*size) value
acctype =Mem_with_type[bits(32) address, integer size, AccType_UNPRIVAccType;acctype] = bits(size*8) value
boolean iswrite = TRUE;
if
(acctype) then
value = BigEndianReverse(value);
aligned = AArch32.CheckAlignment(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
AArch32.MemSingle[address, 1, acctype, aligned] = value<7:0>;
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
AArch32.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
else
AArch32.MemSingleMem_with_typeBigEndian[address, size, acctype] = value;
[address, size, acctype, aligned] = value;
return;
// Mem_with_type[] - non-assignment (read) form
// ============================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch32.MemSingle directly.
bits(size*8)// AArch32.ESBOperation()
// ======================
// Perform the AArch32 ESB operation for ESB executed in AArch32 state Mem_with_type[bits(32) address, integer size,AArch32.ESBOperation()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == AccTypeEL0 acctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned =&& ! AArch32.CheckAlignmentELUsingAArch32(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
value<7:0> =( AArch32.MemSingleEL1[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c =);
if !route_to_aarch64 && ConstrainUnpredictableEL2Enabled(() && !Unpredictable_DEVPAGE2ELUsingAArch32);
assert c IN {(Constraint_FAULTEL2,) then
route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1';
if !route_to_aarch64 && Constraint_NONEHaveEL};
if c ==( Constraint_NONEEL3 then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> =) && ! AArch32.MemSingleELUsingAArch32[address+i, 1, acctype, aligned];
else
value =( AArch32.MemSingleEL3[address, size, acctype, aligned];
) then
route_to_aarch64 = SCR_EL3.EA == '1';
if if route_to_aarch64 then BigEndianAArch64.ESBOperation(acctype) then
value =();
return;
route_to_monitor = BigEndianReverseHaveEL(value);
return value;
// Mem_with_type[] - assignment (write) form
// =========================================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.(
Mem_with_type[bits(32) address, integer size,) && AccTypeELUsingAArch32 acctype] = bits(size*8) value
boolean iswrite = TRUE;
if( BigEndianEL3(acctype) then
value =) && SCR.EA == '1';
route_to_hyp = PSTATE.EL IN { BigEndianReverseEL0(value);
aligned =, AArch32.CheckAlignmentEL1(address, size, acctype, iswrite);
if !aligned then
assert size > 1;} &&
AArch32.MemSingleEL2Enabled[address, 1, acctype, aligned] = value<7:0>;
() && (HCR.TGE == '1' || HCR.AMO == '1');
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = if route_to_monitor then
target = ConstrainUnpredictableM32_Monitor(;
elsif route_to_hyp || PSTATE.M ==Unpredictable_DEVPAGE2M32_Hyp);
assert c IN {then
target =Constraint_FAULTM32_Hyp,;
else
target = Constraint_NONEM32_Abort};
if c ==;
if Constraint_NONEIsSecure then aligned = TRUE;
for i = 1 to size-1() then
mask_active = TRUE;
elsif target ==
AArch32.MemSingleM32_Monitor[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
elsethen
mask_active = SCR.AW == '1' && (!
(EL2) || (HCR.TGE == '0' && HCR.AMO == '0'));
else
mask_active = target == M32_Abort || PSTATE.M == M32_Hyp;
mask_set = PSTATE.A == '1';
(-, el) = ELFromM32(target);
intdis = Halted() || ExternalDebugInterruptsDisabled(el);
masked = intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending that can be synchronized
// by an Error synchronization event.
if masked && IsSynchronizablePhysicalSErrorPending() then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
ClearPendingPhysicalSErrorAArch32.MemSingleHaveEL[address, size, acctype, aligned] = value;
();
return;
// AArch32.ESBOperation()
// ======================
// Perform the AArch32 ESB operation for ESB executed in AArch32 state// Return the SError syndrome
AArch32.SErrorSyndrome
AArch32.ESBOperation()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL ==AArch32.PhysicalSErrorSyndrome(); EL0 && !ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then
route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1';
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1';
if route_to_aarch64 then
AArch64.ESBOperation();
return;
route_to_monitor = HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.EA == '1';
route_to_hyp = PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1');
if route_to_monitor then
target = M32_Monitor;
elsif route_to_hyp || PSTATE.M == M32_Hyp then
target = M32_Hyp;
else
target = M32_Abort;
if IsSecure() then
mask_active = TRUE;
elsif target == M32_Monitor then
mask_active = SCR.AW == '1' && (!HaveEL(EL2) || (HCR.TGE == '0' && HCR.AMO == '0'));
else
mask_active = target == M32_Abort || PSTATE.M == M32_Hyp;
mask_set = PSTATE.A == '1';
(-, el) = ELFromM32(target);
intdis = Halted() || ExternalDebugInterruptsDisabled(el);
masked = intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending that can be synchronized
// by an Error synchronization event.
if masked && IsSynchronizablePhysicalSErrorPending() then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
ClearPendingPhysicalSError();
return;
// Return the SError syndrome
AArch32.SErrorSyndrome// AArch32.ReportDeferredSError()
// ==============================
// Return deferred SError syndrome
bits(32) AArch32.PhysicalSErrorSyndrome();AArch32.ReportDeferredSError(bits(2) AET, bit ExT)
bits(32) target;
target<31> = '1'; // A
syndrome =Zeros(16);
if PSTATE.EL == EL2 then
syndrome<11:10> = AET; // AET
syndrome<9> = ExT; // EA
syndrome<5:0> = '010001'; // DFSC
else
syndrome<15:14> = AET; // AET
syndrome<12> = ExT; // ExT
syndrome<9> = TTBCR.EAE; // LPAE
if TTBCR.EAE == '1' then // Long-descriptor format
syndrome<5:0> = '010001'; // STATUS
else // Short-descriptor format
syndrome<10,3:0> = '10110'; // FS
if HaveAnyAArch64() then
target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero
else
target<15:0> = syndrome;
return target;
// AArch32.ReportDeferredSError()
// ==============================
// Return deferred SError syndrome
bits(32)type AArch32.ReportDeferredSError(bits(2) AET, bit ExT)
bits(32) target;
target<31> = '1'; // A
syndrome =AArch32.SErrorSyndrome is (
bits(2) AET,
bit ExT
) Zeros(16);
if PSTATE.EL == EL2 then
syndrome<11:10> = AET; // AET
syndrome<9> = ExT; // EA
syndrome<5:0> = '010001'; // DFSC
else
syndrome<15:14> = AET; // AET
syndrome<12> = ExT; // ExT
syndrome<9> = TTBCR.EAE; // LPAE
if TTBCR.EAE == '1' then // Long-descriptor format
syndrome<5:0> = '010001'; // STATUS
else // Short-descriptor format
syndrome<10,3:0> = '10110'; // FS
if HaveAnyAArch64() then
target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero
else
target<15:0> = syndrome;
return target;
type// AArch32.vESBOperation()
// =======================
// Perform the ESB operation for virtual SError interrupts executed in AArch32 state AArch32.SErrorSyndrome is (
bits(2) AET,
bit ExT
)AArch32.vESBOperation()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
// Check for EL2 using AArch64 state
if !ELUsingAArch32(EL2) then
AArch64.vESBOperation();
return;
// If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a
// virtual SError interrupt might be pending
vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1';
vSEI_pending = vSEI_enabled && HCR.VA == '1';
vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>);
HCR.VA = '0'; // Clear pending virtual SError
return;
// AArch32.vESBOperation()
// =======================
// Perform the ESB operation for virtual SError interrupts executed in AArch32 state// AArch32.ResetGeneralRegisters()
// ===============================
AArch32.vESBOperation()
assert PSTATE.EL IN {AArch32.ResetGeneralRegisters()
for i = 0 to 7EL0R,[i] = bits(32) UNKNOWN;
for i = 8 to 12 EL1Rmode} &&[i, EL2EnabledM32_User();
// Check for EL2 using AArch64 state
if !] = bits(32) UNKNOWN;ELUsingAArch32Rmode([i,M32_FIQ] = bits(32) UNKNOWN;
if HaveEL(EL2) then
AArch64.vESBOperationRmode();
return;
// If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a
// virtual SError interrupt might be pending
vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1';
vSEI_pending = vSEI_enabled && HCR.VA == '1';
vintdis =[13, HaltedM32_Hyp() ||] = bits(32) UNKNOWN; // No R14_hyp
for i = 13 to 14 ExternalDebugInterruptsDisabledRmode([i,EL1M32_User);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
VDISR =] = bits(32) UNKNOWN; [i, M32_FIQ] = bits(32) UNKNOWN;
Rmode[i, M32_IRQ] = bits(32) UNKNOWN;
Rmode[i, M32_Svc] = bits(32) UNKNOWN;
Rmode[i, M32_Abort] = bits(32) UNKNOWN;
Rmode[i, M32_Undef] = bits(32) UNKNOWN;
if HaveEL(EL3) then Rmode[i, M32_MonitorAArch32.ReportDeferredSErrorRmode(VDFSR<15:14>, VDFSR<12>);
HCR.VA = '0'; // Clear pending virtual SError
] = bits(32) UNKNOWN;
return;
// AArch32.ResetGeneralRegisters()
// ===============================// AArch32.ResetSIMDFPRegisters()
// ==============================
AArch32.ResetGeneralRegisters()
AArch32.ResetSIMDFPRegisters()
for i = 0 to 7 for i = 0 to 15
RQ[i] = bits(32) UNKNOWN;
for i = 8 to 12
Rmode[i, M32_User] = bits(32) UNKNOWN;
Rmode[i, M32_FIQ] = bits(32) UNKNOWN;
if HaveEL(EL2) then Rmode[13, M32_Hyp] = bits(32) UNKNOWN; // No R14_hyp
for i = 13 to 14
Rmode[i, M32_User] = bits(32) UNKNOWN;
Rmode[i, M32_FIQ] = bits(32) UNKNOWN;
Rmode[i, M32_IRQ] = bits(32) UNKNOWN;
Rmode[i, M32_Svc] = bits(32) UNKNOWN;
Rmode[i, M32_Abort] = bits(32) UNKNOWN;
Rmode[i, M32_Undef] = bits(32) UNKNOWN;
if HaveEL(EL3) then Rmode[i, M32_Monitor] = bits(32) UNKNOWN;
[i] = bits(128) UNKNOWN;
return;
// AArch32.ResetSIMDFPRegisters()
// ==============================// AArch32.ResetSpecialRegisters()
// ===============================
AArch32.ResetSIMDFPRegisters()
AArch32.ResetSpecialRegisters()
for i = 0 to 15 // AArch32 special registers
SPSR_fiq<31:0> = bits(32) UNKNOWN;
SPSR_irq<31:0> = bits(32) UNKNOWN;
SPSR_svc<31:0> = bits(32) UNKNOWN;
SPSR_abt<31:0> = bits(32) UNKNOWN;
SPSR_und<31:0> = bits(32) UNKNOWN;
if
(EL2) then
SPSR_hyp = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
if HaveEL(EL3QHaveEL[i] = bits(128) UNKNOWN;
) then
SPSR_mon = bits(32) UNKNOWN;
// External debug special registers
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
return;
// AArch32.ResetSpecialRegisters()
// ===============================
AArch32.ResetSpecialRegisters()
// AArch32 special registers
SPSR_fiq<31:0> = bits(32) UNKNOWN;
SPSR_irq<31:0> = bits(32) UNKNOWN;
SPSR_svc<31:0> = bits(32) UNKNOWN;
SPSR_abt<31:0> = bits(32) UNKNOWN;
SPSR_und<31:0> = bits(32) UNKNOWN;
if HaveEL(EL2) then
SPSR_hyp = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
if HaveEL(EL3) then
SPSR_mon = bits(32) UNKNOWN;
// External debug special registers
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
return;AArch32.ResetSystemRegisters(boolean cold_reset);
// ALUExceptionReturn()
// ====================
ALUExceptionReturn(bits(32) address)
if PSTATE.EL == EL2 then
UNDEFINED;
elsif PSTATE.M IN {M32_User,M32_System} then
Constraint c = ConstrainUnpredictable(Unpredictable_ALUEXCEPTIONRETURN);
assert c IN {Constraint_UNDEF, Constraint_NOP};
case c of
when Constraint_UNDEF
UNDEFINED;
when Constraint_NOPEndOfInstruction();
else
AArch32.ExceptionReturn(address, SPSRAArch32.ResetSystemRegisters(boolean cold_reset);[]);
// ALUExceptionReturn()
// ====================// ALUWritePC()
// ============
ALUExceptionReturn(bits(32) address)
if PSTATE.EL ==ALUWritePC(bits(32) address)
if EL2CurrentInstrSet then
UNDEFINED;
elsif PSTATE.M IN {() ==M32_UserInstrSet_A32,thenM32_SystemBXWritePC} then(address,
ConstraintBranchType_INDIR c =);
else ConstrainUnpredictableBranchWritePC((address,Unpredictable_ALUEXCEPTIONRETURNBranchType_INDIR);
assert c IN {Constraint_UNDEF, Constraint_NOP};
case c of
when Constraint_UNDEF
UNDEFINED;
when Constraint_NOPEndOfInstruction();
else
AArch32.ExceptionReturn(address, SPSR[]););
// ALUWritePC()
// ============// BXWritePC()
// ===========
ALUWritePC(bits(32) address)
ifBXWritePC(bits(32) address, CurrentInstrSetBranchType() ==branch_type)
if address<0> == '1' then SelectInstrSet(InstrSet_T32);
address<0> = '0';
else
SelectInstrSet(InstrSet_A32 then);
// For branches to an unaligned PC counter in A32 state, the processor takes the branch
// and does one of:
// * Forces the address to be aligned
// * Leaves the PC unaligned, meaning the target generates a PC Alignment fault.
if address<1> == '1' &&
BXWritePCConstrainUnpredictableBool(address,( BranchType_INDIRUnpredictable_A32FORCEALIGNPC);
else) then
address<1> = '0';
BranchWritePCBranchTo(address, BranchType_INDIR);(address, branch_type);
// BXWritePC()
// ===========// BranchWritePC()
// ===============
BXWritePC(bits(32) address,BranchWritePC(bits(32) address, BranchType branch_type)
if address<0> == '1' then if
SelectInstrSetCurrentInstrSet(() ==InstrSet_T32);
address<0> = '0';
else
SelectInstrSet(InstrSet_A32);
// For branches to an unaligned PC counter in A32 state, the processor takes the branch
// and does one of:
// * Forces the address to be aligned
// * Leaves the PC unaligned, meaning the target generates a PC Alignment fault.
if address<1> == '1' && ConstrainUnpredictableBool(Unpredictable_A32FORCEALIGNPC) then
address<1> = '0';then
address<1:0> = '00';
else
address<0> = '0';
BranchTo(address, branch_type);
// BranchWritePC()
// ===============// D[] - non-assignment form
// =========================
bits(64)
BranchWritePC(bits(32) address,D[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
return vreg<base+63:base>;
// D[] - assignment form
// ===================== BranchType branch_type)
if CurrentInstrSet() == InstrSet_A32 then
address<1:0> = '00';
else
address<0> = '0';
BranchTo(address, branch_type);D[integer n] = bits(64) value
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
vreg<base+63:base> = value;
V[n DIV 2] = vreg;
return;
// D[] - non-assignment form
// =========================
// Din[] - non-assignment form
// ===========================
bits(64) D[integer n]
Din[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
return vreg<base+63:base>;
// D[] - assignment form
// ===================== return _Dclone[n];
D[integer n] = bits(64) value
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
vreg<base+63:base> = value;
V[n DIV 2] = vreg;
return;
// Din[] - non-assignment form
// ===========================
bits(64)// LR - assignment form
// ==================== Din[integer n]
assert n >= 0 && n <= 31;
return _Dclone[n];LR = bits(32) valueR[14] = value;
return;
// LR - non-assignment form
// ========================
bits(32) LR
return R[14];
// LR - assignment form
// ====================// LoadWritePC()
// =============
LR = bits(32) valueLoadWritePC(bits(32) address)
RBXWritePC[14] = value;
return;
// LR - non-assignment form
// ========================
bits(32)(address, LR
return R[14];);
// LoadWritePC()
// =============// LookUpRIndex()
// ==============
integer
LoadWritePC(bits(32) address)LookUpRIndex(integer n, bits(5) mode)
assert n >= 0 && n <= 14;
case n of // Select index by mode: usr fiq irq svc abt und hyp
when 8 result =
BXWritePCRBankSelect(address,(mode, 8, 24, 8, 8, 8, 8, 8);
when 9 result = (mode, 9, 25, 9, 9, 9, 9, 9);
when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10);
when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11);
when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12);
when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15);
when 14 result = RBankSelectBranchType_INDIRRBankSelect);(mode, 14, 30, 16, 18, 20, 22, 14);
otherwise result = n;
return result;
// LookUpRIndex()
// ==============
integerbits(32) LookUpRIndex(integer n, bits(5) mode)
assert n >= 0 && n <= 14;
case n of // Select index by mode: usr fiq irq svc abt und hyp
when 8 result =SP_mon;
bits(32) RBankSelect(mode, 8, 24, 8, 8, 8, 8, 8);
when 9 result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9);
when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10);
when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11);
when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12);
when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15);
when 14 result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14);
otherwise result = n;
return result;LR_mon;
// PC - non-assignment form
// ========================
bits(32) SP_mon;
bits(32)PC
return LR_mon;[15]; // This includes the offset from AArch32 state
// PC - non-assignment form
// ========================
// PCStoreValue()
// ==============
bits(32) PC
PCStoreValue()
// This function returns the PC value. On architecture versions before Armv7, it
// is permitted to instead return PC+4, provided it does so consistently. It is
// used only to describe A32 instructions, so it returns the address of the current
// instruction plus 8 (normally) or 12 (when the alternative is permitted).
return RPC[15]; // This includes the offset from AArch32 state;
// PCStoreValue()
// ==============
// Q[] - non-assignment form
// =========================
bits(32)bits(128) PCStoreValue()
// This function returns the PC value. On architecture versions before Armv7, it
// is permitted to instead return PC+4, provided it does so consistently. It is
// used only to describe A32 instructions, so it returns the address of the current
// instruction plus 8 (normally) or 12 (when the alternative is permitted).
returnQ[integer n]
assert n >= 0 && n <= 15;
return V[n];
// Q[] - assignment form
// ===================== PC;Q[integer n] = bits(128) value
assert n >= 0 && n <= 15;
V[n] = value;
return;
// Q[] - non-assignment form
// =========================
// Qin[] - non-assignment form
// ===========================
bits(128) Q[integer n]
Qin[integer n]
assert n >= 0 && n <= 15;
return V[n];
// Q[] - assignment form
// ===================== return
[2*n+1]:DinQ[integer n] = bits(128) value
assert n >= 0 && n <= 15;
V[n] = value;
return;[2*n];
// Qin[] - non-assignment form
// ===========================
bits(128)// R[] - assignment form
// ===================== Qin[integer n]
assert n >= 0 && n <= 15;
returnR[integer n] = bits(32) value DinRmode[2*n+1]:[n, PSTATE.M] = value;
return;
// R[] - non-assignment form
// =========================
bits(32)R[integer n]
if n == 15 then
offset = (if CurrentInstrSet() == InstrSet_A32 then 8 else 4);
return _PC<31:0> + offset;
else
return RmodeDin[2*n];[n, PSTATE.M];
// R[] - assignment form
// =====================// RBankSelect()
// =============
integer
R[integer n] = bits(32) valueRBankSelect(bits(5) mode, integer usr, integer fiq, integer irq,
integer svc, integer abt, integer und, integer hyp)
case mode of
when
RmodeM32_User[n, PSTATE.M] = value;
return;
// R[] - non-assignment form
// =========================
bits(32)result = usr; // User mode
when R[integer n]
if n == 15 then
offset = (ifresult = fiq; // FIQ mode
when CurrentInstrSetM32_IRQ() ==result = irq; // IRQ mode
when InstrSet_A32M32_Svc then 8 else 4);
return _PC<31:0> + offset;
else
returnresult = svc; // Supervisor mode
when result = abt; // Abort mode
when M32_Hyp result = hyp; // Hyp mode
when M32_Undef result = und; // Undefined mode
when M32_System result = usr; // System mode uses User mode registers
otherwise UnreachableRmodeM32_Abort[n, PSTATE.M];(); // Monitor mode
return result;
// RBankSelect()
// =============
// Rmode[] - non-assignment form
// =============================
integerbits(32) RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq,
integer svc, integer abt, integer und, integer hyp)
Rmode[integer n, bits(5) mode]
assert n >= 0 && n <= 14;
case mode of
when // Check for attempted use of Monitor mode in Non-secure state.
if ! M32_UserIsSecure result = usr; // User mode
when() then assert mode != M32_FIQM32_Monitor result = fiq; // FIQ mode
when;
assert ! M32_IRQBadMode result = irq; // IRQ mode
when(mode);
if mode == M32_SvcM32_Monitor result = svc; // Supervisor mode
whenthen
if n == 13 then return SP_mon;
elsif n == 14 then return LR_mon;
else return _R[n]<31:0>;
else
return _R[ M32_AbortLookUpRIndex result = abt; // Abort mode
when(n, mode)]<31:0>;
// Rmode[] - assignment form
// ========================= M32_Hyp result = hyp; // Hyp mode
whenRmode[integer n, bits(5) mode] = bits(32) value
assert n >= 0 && n <= 14;
// Check for attempted use of Monitor mode in Non-secure state.
if ! M32_UndefIsSecure result = und; // Undefined mode
when() then assert mode != M32_SystemM32_Monitor result = usr; // System mode uses User mode registers
otherwise;
assert ! (mode);
if mode == M32_Monitor then
if n == 13 then SP_mon = value;
elsif n == 14 then LR_mon = value;
else _R[n]<31:0> = value;
else
// It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X
// register are unchanged or set to zero. This is also tested for on
// exception entry, as this applies to all AArch32 registers.
if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[LookUpRIndex(n, mode)] = ZeroExtend(value);
else
_R[LookUpRIndexUnreachableBadMode(); // Monitor mode
(n, mode)]<31:0> = value;
return result; return;
// Rmode[] - non-assignment form
// =============================
// S[] - non-assignment form
// =========================
bits(32) Rmode[integer n, bits(5) mode]
assert n >= 0 && n <= 14;
S[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
return vreg<base+31:base>;
// Check for attempted use of Monitor mode in Non-secure state.
if !// S[] - assignment form
// =====================IsSecure() then assert mode != M32_Monitor;
assert !BadMode(mode);
if mode == M32_Monitor then
if n == 13 then return SP_mon;
elsif n == 14 then return LR_mon;
else return _R[n]<31:0>;
else
return _R[LookUpRIndex(n, mode)]<31:0>;
// Rmode[] - assignment form
// =========================
Rmode[integer n, bits(5) mode] = bits(32) value
assert n >= 0 && n <= 14;
// Check for attempted use of Monitor mode in Non-secure state.
if !IsSecure() then assert mode != M32_Monitor;
assert !BadMode(mode);
if mode == M32_Monitor then
if n == 13 then SP_mon = value;
elsif n == 14 then LR_mon = value;
else _R[n]<31:0> = value;
else
// It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X
// register are unchanged or set to zero. This is also tested for on
// exception entry, as this applies to all AArch32 registers.
if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[LookUpRIndex(n, mode)] = ZeroExtend(value);
else
_R[LookUpRIndex(n, mode)]<31:0> = value;
S[integer n] = bits(32) value
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
vreg<base+31:base> = value;
V[n DIV 4] = vreg;
return;
// S[] - non-assignment form
// =========================
bits(32)// SP - assignment form
// ==================== S[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
return vreg<base+31:base>;
// S[] - assignment form
// =====================SP = bits(32) value
R[13] = value;
return;
// SP - non-assignment form
// ========================
bits(32) SP
return RS[integer n] = bits(32) value
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
vreg<base+31:base> = value;
V[n DIV 4] = vreg;
return;[13];
// SP - assignment form
// ====================array bits(64) _Dclone[0..31];
SP = bits(32) value
R[13] = value;
return;
// SP - non-assignment form
// ========================
bits(32) SP
return R[13];
array bits(64) _Dclone[0..31];// AArch32.ExceptionReturn()
// =========================AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr)
SynchronizeContext();
// Attempts to change to an illegal mode or state will invoke the Illegal Execution state
// mechanism
SetPSTATEFromPSR(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' then
// If the exception return is illegal, PC[1:0] are UNKNOWN
new_pc<1:0> = bits(2) UNKNOWN;
else
// LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
BranchTo(new_pc, BranchType_ERET);
// AArch32.ExceptionReturn()
// =========================// AArch32.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1CPR/WP
boolean
AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr)AArch32.ExecutingATS1xPInstr()
if !
SynchronizeContextHavePrivATExt();
// Attempts to change to an illegal mode or state will invoke the Illegal Execution state
// mechanism() then return FALSE;
instr =
SetPSTATEFromPSRThisInstr(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' then
// If the exception return is illegal, PC[1:0] are UNKNOWN
new_pc<1:0> = bits(2) UNKNOWN;
else
// LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
BranchTo(new_pc, BranchType_ERET);();
if instr<24+:4> == '1110' && instr<8+:4> == '1111' then
opc1 = instr<21+:3>;
CRn = instr<16+:4>;
CRm = instr<0+:4>;
opc2 = instr<5+:3>;
return (opc1 == '000' && CRn == '0111' && CRm == '1001' && opc2 IN {'000','001'});
else
return FALSE;
// AArch32.ExecutingCP10or11Instr() // ================================ boolean AArch32.ExecutingCP10or11Instr() instr = ThisInstr(); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); else // InstrSet_T32 return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');
// AArch32.ITAdvance()
// ===================// AArch32.ExecutingLSMInstr()
// ===========================
// Returns TRUE if processor is executing a Load/Store Multiple instruction
boolean
AArch32.ITAdvance()
if PSTATE.IT<2:0> == '000' then
PSTATE.IT = '00000000';
else
PSTATE.IT<4:0> =AArch32.ExecutingLSMInstr()
instr = ();
instr_set = CurrentInstrSet();
assert instr_set IN {InstrSet_A32, InstrSet_T32};
if instr_set == InstrSet_A32 then
return (instr<28+:4> != '1111' && instr<25+:3> == '100');
else // InstrSet_T32
if ThisInstrLengthLSLThisInstr(PSTATE.IT<4:0>, 1);
return;() == 16 then
return (instr<12+:4> == '1100');
else
return (instr<25+:7> == '1110100' && instr<22> == '0');
// Read from a 32-bit AArch32 System register and return the register's contents.
bits(32)// AArch32.ITAdvance()
// =================== AArch32.SysRegRead(integer cp_num, bits(32) instr);AArch32.ITAdvance()
if PSTATE.IT<2:0> == '000' then
PSTATE.IT = '00000000';
else
PSTATE.IT<4:0> =LSL(PSTATE.IT<4:0>, 1);
return;
// Read from a 64-bit AArch32 System register and return the register's contents.
bits(64)// Read from a 32-bit AArch32 System register and return the register's contents.
bits(32) AArch32.SysRegRead64(integer cp_num, bits(32) instr);AArch32.SysRegRead(integer cp_num, bits(32) instr);
// AArch32.SysRegReadCanWriteAPSR()
// ================================
// Determines whether the AArch32 System register read instruction can write to APSR flags.
boolean// Read from a 64-bit AArch32 System register and return the register's contents.
bits(64) AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr)
assertAArch32.SysRegRead64(integer cp_num, bits(32) instr); UsingAArch32();
assert (cp_num IN {14,15});
assert cp_num == UInt(instr<11:8>);
opc1 = UInt(instr<23:21>);
opc2 = UInt(instr<7:5>);
CRn = UInt(instr<19:16>);
CRm = UInt(instr<3:0>);
if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint
return TRUE;
return FALSE;
// Write to a 32-bit AArch32 System register.// AArch32.SysRegReadCanWriteAPSR()
// ================================
// Determines whether the AArch32 System register read instruction can write to APSR flags.
boolean
AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr)
assertUsingAArch32();
assert (cp_num IN {14,15});
assert cp_num == UInt(instr<11:8>);
opc1 = UInt(instr<23:21>);
opc2 = UInt(instr<7:5>);
CRn = UInt(instr<19:16>);
CRm = UInt(instr<3:0>);
if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint
return TRUE;
return FALSE;
// Write to a 64-bit AArch32 System register.// Write to a 32-bit AArch32 System register.
AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val);AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);
// AArch32.WriteMode()
// ===================
// Function for dealing with writes to PSTATE.M from AArch32 state only.
// This ensures that PSTATE.EL and PSTATE.SP are always valid.// Write to a 64-bit AArch32 System register.
AArch32.WriteMode(bits(5) mode)
(valid,el) =AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val); ELFromM32(mode);
assert valid;
PSTATE.M = mode;
PSTATE.EL = el;
PSTATE.nRW = '1';
PSTATE.SP = (if mode IN {M32_User,M32_System} then '0' else '1');
return;
// AArch32.WriteModeByInstr()
// ==========================
// Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that
// illegal state changes are correctly flagged in PSTATE.IL.// AArch32.WriteMode()
// ===================
// Function for dealing with writes to PSTATE.M from AArch32 state only.
// This ensures that PSTATE.EL and PSTATE.SP are always valid.
AArch32.WriteModeByInstr(bits(5) mode)
AArch32.WriteMode(bits(5) mode)
(valid,el) = ELFromM32(mode);
// 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value
// of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to
// PSTATE.EL if it would result in any of:
// * A change to a mode that would cause entry to a higher Exception level.
if assert valid;
PSTATE.M = mode;
PSTATE.EL = el;
PSTATE.nRW = '1';
PSTATE.SP = (if mode IN { UIntM32_User(el) >, UIntM32_System(PSTATE.EL) then
valid = FALSE;
// * A change to or from Hyp mode.
if (PSTATE.M == M32_Hyp || mode == M32_Hyp) && PSTATE.M != mode then
valid = FALSE;
// * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode.
if PSTATE.M == M32_Monitor && HaveEL(EL2) && el == EL1 && SCR.NS == '1' && HCR.TGE == '1' then
valid = FALSE;
if !valid then
PSTATE.IL = '1';
else
AArch32.WriteMode(mode);} then '0' else '1');
return;
// BadMode()
// =========
boolean// AArch32.WriteModeByInstr()
// ==========================
// Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that
// illegal state changes are correctly flagged in PSTATE.IL. BadMode(bits(5) mode)
// Return TRUE if 'mode' encodes a mode that is not valid for this implementation
case mode of
whenAArch32.WriteModeByInstr(bits(5) mode)
(valid,el) = M32_MonitorELFromM32
valid =(mode);
// 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value
// of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to
// PSTATE.EL if it would result in any of:
// * A change to a mode that would cause entry to a higher Exception level.
if HaveAArch32ELUInt((el) >EL3UInt);
when(PSTATE.EL) then
valid = FALSE;
// * A change to or from Hyp mode.
if (PSTATE.M == M32_Hyp
valid =|| mode == HaveAArch32ELM32_Hyp() && PSTATE.M != mode then
valid = FALSE;
// * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode.
if PSTATE.M ==EL2M32_Monitor);
when&& M32_FIQHaveEL,( M32_IRQEL2,) && el == M32_Svc, M32_Abort, M32_Undef, M32_System
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
// Therefore it is sufficient to test this implementation supports EL1 using AArch32.
valid = HaveAArch32EL(EL1);
when&& SCR.NS == '1' && HCR.TGE == '1' then
valid = FALSE;
if !valid then
PSTATE.IL = '1';
else M32_UserAArch32.WriteMode
valid = HaveAArch32EL(EL0);
otherwise
valid = FALSE; // Passed an illegal mode value
return !valid;(mode);
// BankedRegisterAccessValid()
// ===========================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to registers
// other than the SPSRs that are invalid. This includes ELR_hyp accesses.// BadMode()
// =========
boolean
BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '000xx', '00100' // R8_usr to R12_usr
if mode !=BadMode(bits(5) mode)
// Return TRUE if 'mode' encodes a mode that is not valid for this implementation
case mode of
when M32_FIQM32_Monitor then UNPREDICTABLE;
when '00101' // SP_usr
if mode ==valid = M32_SystemHaveAArch32EL then UNPREDICTABLE;
when '00110' // LR_usr
if mode IN {(EL3);
when M32_Hyp,valid =M32_SystemHaveAArch32EL} then UNPREDICTABLE;
when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq
if mode ==( EL2);
when M32_FIQ then UNPREDICTABLE;
when '1000x' // LR_irq, SP_irq
if mode ==, M32_IRQ then UNPREDICTABLE;
when '1001x' // LR_svc, SP_svc
if mode ==, M32_Svc then UNPREDICTABLE;
when '1010x' // LR_abt, SP_abt
if mode ==, M32_Abort then UNPREDICTABLE;
when '1011x' // LR_und, SP_und
if mode ==, M32_Undef then UNPREDICTABLE;
when '1110x' // LR_mon, SP_mon
if !,HaveELM32_System(// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
// Therefore it is sufficient to test this implementation supports EL1 using AArch32.
valid =EL3HaveAArch32EL) || !(IsSecureEL1() || mode ==);
when M32_MonitorM32_User then UNPREDICTABLE;
when '11110' // ELR_hyp, only from Monitor or Hyp mode
if !valid =HaveELHaveAArch32EL(EL2EL0) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE;
when '11111' // SP_hyp, only from Monitor mode
if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE;
);
otherwise
UNPREDICTABLE;
return; valid = FALSE; // Passed an illegal mode value
return !valid;
// CPSRWriteByInstr()
// ==================
// Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction.// BankedRegisterAccessValid()
// ===========================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to registers
// other than the SPSRs that are invalid. This includes ELR_hyp accesses.
CPSRWriteByInstr(bits(32) value, bits(4) bytemask)
privileged = PSTATE.EL !=BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '000xx', '00100' // R8_usr to R12_usr
if mode != EL0M32_FIQ; // PSTATE.<A,I,F,M> are not writable at EL0
// Write PSTATE from 'value', ignoring bytes masked by 'bytemask'
if bytemask<3> == '1' then
PSTATE.<N,Z,C,V,Q> = value<31:27>;
// Bits <26:24> are ignored
if bytemask<2> == '1' then
ifthen UNPREDICTABLE;
when '00101' // SP_usr
if mode == HaveSSBSExtM32_System() then
PSTATE.SSBS = value<23>;
if privileged then
PSTATE.PAN = value<22>;
ifthen UNPREDICTABLE;
when '00110' // LR_usr
if mode IN { HaveDITExtM32_Hyp() then
PSTATE.DIT = value<21>;
// Bit <20> is RES0
PSTATE.GE = value<19:16>;
if bytemask<1> == '1' then
// Bits <15:10> are RES0
PSTATE.E = value<9>; // PSTATE.E is writable at EL0
if privileged then
PSTATE.A = value<8>;
if bytemask<0> == '1' then
if privileged then
PSTATE.<I,F> = value<7:6>;
// Bit <5> is RES0
// AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change.,
} then UNPREDICTABLE;
when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq
if mode == M32_FIQ then UNPREDICTABLE;
when '1000x' // LR_irq, SP_irq
if mode == M32_IRQ then UNPREDICTABLE;
when '1001x' // LR_svc, SP_svc
if mode == M32_Svc then UNPREDICTABLE;
when '1010x' // LR_abt, SP_abt
if mode == M32_Abort then UNPREDICTABLE;
when '1011x' // LR_und, SP_und
if mode == M32_Undef then UNPREDICTABLE;
when '1110x' // LR_mon, SP_mon
if !HaveEL(EL3) || !IsSecure() || mode == M32_Monitor then UNPREDICTABLE;
when '11110' // ELR_hyp, only from Monitor or Hyp mode
if !HaveEL(EL2) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE;
when '11111' // SP_hyp, only from Monitor mode
if !HaveEL(EL2) || mode != M32_MonitorAArch32.WriteModeByInstrM32_System(value<4:0>);
then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
return;
// ConditionPassed()
// =================
boolean// CPSRWriteByInstr()
// ==================
// Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction. ConditionPassed()
returnCPSRWriteByInstr(bits(32) value, bits(4) bytemask)
privileged = PSTATE.EL != ConditionHoldsEL0(; // PSTATE.<A,I,F,M> are not writable at EL0
// Write PSTATE from 'value', ignoring bytes masked by 'bytemask'
if bytemask<3> == '1' then
PSTATE.<N,Z,C,V,Q> = value<31:27>;
// Bits <26:24> are ignored
if bytemask<2> == '1' then
if() then
PSTATE.SSBS = value<23>;
if privileged then
PSTATE.PAN = value<22>;
if HaveDITExt() then
PSTATE.DIT = value<21>;
// Bit <20> is RES0
PSTATE.GE = value<19:16>;
if bytemask<1> == '1' then
// Bits <15:10> are RES0
PSTATE.E = value<9>; // PSTATE.E is writable at EL0
if privileged then
PSTATE.A = value<8>;
if bytemask<0> == '1' then
if privileged then
PSTATE.<I,F> = value<7:6>;
// Bit <5> is RES0
// AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change.
AArch32.WriteModeByInstrAArch32.CurrentCondHaveSSBSExt());(value<4:0>);
return;
bits(4)// ConditionPassed()
// =================
boolean AArch32.CurrentCond();ConditionPassed()
returnConditionHolds(AArch32.CurrentCond());
// InITBlock()
// ===========
booleanbits(4) InITBlock()
ifAArch32.CurrentCond(); CurrentInstrSet() == InstrSet_T32 then
return PSTATE.IT<3:0> != '0000';
else
return FALSE;
// LastInITBlock()
// ===============
// InITBlock()
// ===========
boolean LastInITBlock()
return (PSTATE.IT<3:0> == '1000');InITBlock()
ifCurrentInstrSet() == InstrSet_T32 then
return PSTATE.IT<3:0> != '0000';
else
return FALSE;
// SPSRWriteByInstr()
// ==================// LastInITBlock()
// ===============
boolean
SPSRWriteByInstr(bits(32) value, bits(4) bytemask)
bits(32) new_spsr =LastInITBlock()
return (PSTATE.IT<3:0> == '1000'); SPSR[];
if bytemask<3> == '1' then
new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits
if bytemask<2> == '1' then
new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags
if bytemask<1> == '1' then
new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask
if bytemask<0> == '1' then
new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits
SPSR[] = new_spsr; // UNPREDICTABLE if User or System mode
return;
// SPSRaccessValid()
// =================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs
// that are UNPREDICTABLE// SPSRWriteByInstr()
// ==================
SPSRaccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '01110' // SPSR_fiq
if mode ==SPSRWriteByInstr(bits(32) value, bits(4) bytemask)
bits(32) new_spsr = M32_FIQSPSR then UNPREDICTABLE;
when '10000' // SPSR_irq
if mode ==[];
if bytemask<3> == '1' then
new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits
if bytemask<2> == '1' then
new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags
if bytemask<1> == '1' then
new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask
if bytemask<0> == '1' then
new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits M32_IRQSPSR then UNPREDICTABLE;
when '10010' // SPSR_svc
if mode == M32_Svc then UNPREDICTABLE;
when '10100' // SPSR_abt
if mode == M32_Abort then UNPREDICTABLE;
when '10110' // SPSR_und
if mode == M32_Undef then UNPREDICTABLE;
when '11100' // SPSR_mon
if !HaveEL(EL3) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE;
when '11110' // SPSR_hyp
if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
[] = new_spsr; // UNPREDICTABLE if User or System mode
return;
// SelectInstrSet()
// ================// SPSRaccessValid()
// =================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs
// that are UNPREDICTABLE
SelectInstrSet(SPSRaccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '01110' // SPSR_fiq
if mode ==InstrSetM32_FIQ iset)
assertthen UNPREDICTABLE;
when '10000' // SPSR_irq
if mode == CurrentInstrSetM32_IRQ() IN {then UNPREDICTABLE;
when '10010' // SPSR_svc
if mode ==InstrSet_A32M32_Svc,then UNPREDICTABLE;
when '10100' // SPSR_abt
if mode == InstrSet_T32M32_Abort};
assert iset IN {then UNPREDICTABLE;
when '10110' // SPSR_und
if mode ==InstrSet_A32M32_Undef,then UNPREDICTABLE;
when '11100' // SPSR_mon
if ! InstrSet_T32HaveEL};
PSTATE.T = if iset ==( ) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE;
when '11110' // SPSR_hyp
if !HaveEL(EL2) || mode != M32_MonitorInstrSet_A32EL3 then '0' else '1';
then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
return;
// Sat()
// =====
bits(N)// SelectInstrSet()
// ================ Sat(integer i, integer N, boolean unsigned)
result = if unsigned thenSelectInstrSet( UnsignedSatInstrSet(i, N) elseiset)
assert () IN {InstrSet_A32, InstrSet_T32};
assert iset IN {InstrSet_A32, InstrSet_T32};
PSTATE.T = if iset == InstrSet_A32SignedSatCurrentInstrSet(i, N);
return result;then '0' else '1';
return;
// SignedSat()
// ===========
// Sat()
// =====
bits(N) SignedSat(integer i, integer N)
(result, -) =Sat(integer i, integer N, boolean unsigned)
result = if unsigned then (i, N) else SignedSatSignedSatQUnsignedSat(i, N);
return result;
// UnsignedSat()
// =============
// SignedSat()
// ===========
bits(N) UnsignedSat(integer i, integer N)
SignedSat(integer i, integer N)
(result, -) = UnsignedSatQSignedSatQ(i, N);
return result;
// AArch32.CombineS1S2Desc()
// =========================
// Combines the address descriptors from stage 1 and stage 2
// UnsignedSat()
// =============
AddressDescriptorbits(N) AArch32.CombineS1S2Desc(UnsignedSat(integer i, integer N)
(result, -) =AddressDescriptorUnsignedSatQ s1desc, AddressDescriptor s2desc, AccType s2acctype)
AddressDescriptor result;
result.paddress = s2desc.paddress;
apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1';
if IsFault(s1desc) || IsFault(s2desc) then
result = if IsFault(s1desc) then s1desc else s2desc;
else
result.fault = AArch32.NoFault();
if s2desc.memattrs.memtype == MemType_Device || (
(apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then
result.memattrs.memtype = MemType_Device;
if s1desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
// S1 can be either Normal or Device, S2 is Normal.
else
result.memattrs.memtype = MemType_Normal;
result.memattrs.device = DeviceType UNKNOWN;
result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype, s1desc.memattrs.memtype);
result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype, s1desc.memattrs.memtype);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == MemAttr_WB &&
result.memattrs.inner.hints == MemHint_RWA &&
result.memattrs.outer.attrs == MemAttr_WB &&
result.memattrs.outer.hints == MemHint_RWA);
result.memattrs = MemAttrDefaults(result.memattrs);
(i, N);
return result;
// AArch32.DefaultTEXDecode()
// ==========================
// AArch32.CombineS1S2Desc()
// =========================
// Combines the address descriptors from stage 1 and stage 2
MemoryAttributesAddressDescriptor AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.CombineS1S2Desc( AddressDescriptor s1desc, AddressDescriptor s2desc, AccType acctype)s2acctype)
MemoryAttributesAddressDescriptor memattrs;
result;
result.paddress = s2desc.paddress;
// Reserved values map to allocated values
if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then
bits(5) texcb;
(-, texcb) = apply_force_writeback = ConstrainUnpredictableBitsHaveStage2MemAttrControl(() && HCR_EL2.FWB == '1';
ifUnpredictable_RESTEXCBIsFault);
TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>;
case TEX:C:B of
when '00000'
// Device-nGnRnE
memattrs.memtype =(s1desc) || IsFault(s2desc) then
result = if IsFault(s1desc) then s1desc else s2desc;
else
result.fault = AArch32.NoFault();
if s2desc.memattrs.memtype == MemType_Device;
memattrs.device =|| (
(apply_force_writeback && s1desc.memattrs.memtype == DeviceType_nGnRnEMemType_Device;
when '00001', '01000'
// Device-nGnRE
memattrs.memtype =&& s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device;
memattrs.device =) ) then
result.memattrs.memtype = DeviceType_nGnREMemType_Device;
when '00010', '00011', '00100'
// Write-back or Write-through Read allocate, or Non-cacheable
memattrs.memtype = if s1desc.memattrs.memtype == MemType_Normal;
memattrs.inner =then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype == ShortConvertAttrsHintsMemType_Normal(C:B, acctype, FALSE);
memattrs.outer =then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device = ShortConvertAttrsHintsCombineS1S2Device(C:B, acctype, FALSE);
memattrs.shareable = (S == '1');
when '00110'
memattrs =(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
// S1 can be either Normal or Device, S2 is Normal.
else
result.memattrs.memtype = MemoryAttributes IMPLEMENTATION_DEFINED;
when '00111'
// Write-back Read and Write allocate
memattrs.memtype = MemType_Normal;
memattrs.inner = result.memattrs.device = ShortConvertAttrsHintsDeviceType('01', acctype, FALSE);
memattrs.outer =UNKNOWN;
result.memattrs.inner = ShortConvertAttrsHintsCombineS1S2AttrHints('01', acctype, FALSE);
memattrs.shareable = (S == '1');
when '1xxxx'
// Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs
memattrs.memtype =(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype);
result.memattrs.outer = MemType_NormalCombineS1S2AttrHints;
memattrs.inner =(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == ShortConvertAttrsHintsMemAttr_WB(C:B, acctype, FALSE);
memattrs.outer =&&
result.memattrs.inner.hints == ShortConvertAttrsHintsMemHint_RWA(TEX<1:0>, acctype, FALSE);
memattrs.shareable = (S == '1');
otherwise
// Reserved, handled above&&
result.memattrs.outer.attrs ==
&&
result.memattrs.outer.hints == MemHint_RWAUnreachableMemAttr_WB();
);
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
// distinction between inner and outer shareable is not supported in this format
memattrs.outershareable = memattrs.shareable;
memattrs.tagged = FALSE;
return result.memattrs = MemAttrDefaults(memattrs);(result.memattrs);
return result;
// AArch32.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
// AArch32.DefaultTEXDecode()
// ==========================
AddressDescriptorMemoryAttributes AArch32.InstructionDevice(AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S,AddressDescriptor addrdesc, bits(32) vaddress,
bits(40) ipaddress, integer level, bits(4) domain,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =acctype) ConstrainUnpredictableMemoryAttributes(memattrs;
// Reserved values map to allocated values
if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then
bits(5) texcb;
(-, texcb) =Unpredictable_INSTRDEVICEConstrainUnpredictableBits);
assert c IN {(Constraint_NONEUnpredictable_RESTEXCB,);
TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>;
case TEX:C:B of
when '00000'
// Device-nGnRnE
memattrs.memtype = Constraint_FAULTMemType_Device};
if c ==;
memattrs.device = Constraint_FAULTDeviceType_nGnRnE then
addrdesc.fault =;
when '00001', '01000'
// Device-nGnRE
memattrs.memtype = AArch32.PermissionFaultMemType_Device(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype =;
memattrs.device = DeviceType_nGnRE;
when '00010', '00011', '00100'
// Write-back or Write-through Read allocate, or Non-cacheable
memattrs.memtype = MemType_Normal;
addrdesc.memattrs.inner.attrs = memattrs.inner = MemAttr_NCShortConvertAttrsHints;
addrdesc.memattrs.inner.hints =(C:B, acctype, FALSE);
memattrs.outer = (C:B, acctype, FALSE);
memattrs.shareable = (S == '1');
when '00110'
memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
when '00111'
// Write-back Read and Write allocate
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.shareable = (S == '1');
when '1xxxx'
// Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints(C:B, acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints(TEX<1:0>, acctype, FALSE);
memattrs.shareable = (S == '1');
otherwise
// Reserved, handled above
UnreachableMemHint_NoShortConvertAttrsHints;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs =();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
// distinction between inner and outer shareable is not supported in this format
memattrs.outershareable = memattrs.shareable;
memattrs.tagged = FALSE;
return MemAttrDefaults(addrdesc.memattrs);
return addrdesc;(memattrs);
// AArch32.RemappedTEXDecode()
// AArch32.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
MemoryAttributesAddressDescriptor AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.InstructionDevice( AddressDescriptor addrdesc, bits(32) vaddress,
bits(40) ipaddress, integer level, bits(4) domain,
AccType acctype)acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =
MemoryAttributesConstrainUnpredictable memattrs;
region =( UIntUnpredictable_INSTRDEVICE(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme
if region == 6 then
memattrs =);
assert c IN { MemoryAttributesConstraint_NONE IMPLEMENTATION_DEFINED;
else
base = 2 * region;
attrfield = PRRR<base+1:base>;
if attrfield == '11' then // Reserved, maps to allocated value
(-, attrfield) =, ConstrainUnpredictableBitsConstraint_FAULT(};
if c ==Unpredictable_RESPRRRConstraint_FAULT);
case attrfield of
when '00' // Device-nGnRnE
memattrs.memtype =then
addrdesc.fault = MemType_DeviceAArch32.PermissionFault;
memattrs.device =(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype = DeviceType_nGnRnE;
when '01' // Device-nGnRE
memattrs.memtype = MemType_Device;
memattrs.device = DeviceType_nGnRE;
when '10'
memattrs.memtype = MemType_Normal;
memattrs.inner = addrdesc.memattrs.inner.attrs = ShortConvertAttrsHintsMemAttr_NC(NMRR<base+1:base>, acctype, FALSE);
memattrs.outer =;
addrdesc.memattrs.inner.hints = ShortConvertAttrsHintsMemHint_No(NMRR<base+17:base+16>, acctype, FALSE);
s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1;
memattrs.shareable = (s_bit == '1');
memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0');
when '11'
Unreachable();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
memattrs.tagged = FALSE;
return;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs = MemAttrDefaults(memattrs);(addrdesc.memattrs);
return addrdesc;
// AArch32.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
// AArch32.RemappedTEXDecode()
// ===========================
MemoryAttributes AArch32.S1AttrDecode(bits(2) SH, bits(3) attr,AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S, AccType acctype)
MemoryAttributes memattrs;
if PSTATE.EL == region = EL2 then
mair = HMAIR1:HMAIR0;
else
mair = MAIR1:MAIR0;
index = 8 * UInt(attr);
attrfield = mair<index+7:index>;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) =(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme
if region == 6 then
memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
else
base = 2 * region;
attrfield = PRRR<base+1:base>;
if attrfield == '11' then // Reserved, maps to allocated value
(-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIRUnpredictable_RESPRRR);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = case attrfield of
when '00' // Device-nGnRnE
memattrs.memtype = MemType_Device;
case attrfield<3:0> of
when '0000' memattrs.device = memattrs.device = DeviceType_nGnRnE;
when '0100' memattrs.device = when '01' // Device-nGnRE
memattrs.memtype = MemType_Device;
memattrs.device = DeviceType_nGnRE;
when '1000' memattrs.device = when '10'
memattrs.memtype = DeviceType_nGRE;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
memattrs.outer = memattrs.inner = LongConvertAttrsHintsShortConvertAttrsHints(attrfield<7:4>, acctype);
memattrs.inner =(NMRR<base+1:base>, acctype, FALSE);
memattrs.outer = LongConvertAttrsHintsShortConvertAttrsHints(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
else(NMRR<base+17:base+16>, acctype, FALSE);
s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1;
memattrs.shareable = (s_bit == '1');
memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0');
when '11'
Unreachable(); // Reserved, handled above
();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
memattrs.tagged = FALSE;
return MemAttrDefaults(memattrs);
// AArch32.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
// AArch32.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
TLBRecordMemoryAttributes AArch32.TranslateAddressS1Off(bits(32) vaddress,AArch32.S1AttrDecode(bits(2) SH, bits(3) attr, AccType acctype, boolean iswrite)
assertacctype) ELUsingAArch32MemoryAttributes(memattrs;
if PSTATE.EL ==S1TranslationRegimeEL2());then
mair = HMAIR1:HMAIR0;
else
mair = MAIR1:MAIR0;
index = 8 *
TLBRecordUInt result;
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
(attr);
attrfield = mair<index+7:index>;
default_cacheable = ( memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) =HasS2TranslationConstrainUnpredictableBits() && ((if( ELUsingAArch32Unpredictable_RESMAIR();
if !EL2HaveMTEExt) then HCR.DC else HCR_EL2.DC) == '1'));
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype =() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) = MemType_NormalConstrainUnpredictableBits;
result.addrdesc.memattrs.inner.attrs =( MemAttr_WBUnpredictable_RESMAIR; // Write-back
result.addrdesc.memattrs.inner.hints =);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = case attrfield<3:0> of
when '0000' memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = when '0100' memattrs.device = MemAttrHintsDeviceType_nGnRE UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR/HSCTLR.I
if PSTATE.EL ==;
when '1000' memattrs.device = EL2DeviceType_nGRE then
cacheable = HSCTLR.I == '1';
else
cacheable = SCTLR.I == '1';
result.addrdesc.memattrs.memtype =;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = memattrs.outer = MemAttr_WTLongConvertAttrsHints;
result.addrdesc.memattrs.inner.hints =(attrfield<7:4>, acctype);
memattrs.inner = MemHint_RALongConvertAttrsHints;
else
result.addrdesc.memattrs.inner.attrs =(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif MemAttr_NCHaveMTEExt;
result.addrdesc.memattrs.inner.hints =() && attrfield == '11110000' then // Normal, Tagged WB-RWA
memattrs.memtype = MemHint_NoMemType_Normal;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = memattrs.outer = MemAttrDefaultsLongConvertAttrsHints(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address =('1111', acctype); // WB_RWA
memattrs.inner = ZeroExtendLongConvertAttrsHints(vaddress);
result.addrdesc.paddress.NS = if('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = TRUE;
else IsSecureUnreachable() then '0' else '1';
result.addrdesc.fault =(); // Reserved, handled above
return AArch32.NoFaultMemAttrDefaults();
result.descupdate.descaddr = result.addrdesc;
return result;(memattrs);
// AArch32.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
// AArch32.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
bits(2)TLBRecord AArch32.AccessUsesEL(AArch32.TranslateAddressS1Off(bits(32) vaddress,AccType acctype)
if acctype ==acctype, boolean iswrite)
assert AccType_UNPRIVELUsingAArch32 then
return( ());
TLBRecord result;
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
default_cacheable = (HasS2Translation() && ((if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC) == '1'));
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = MemType_Normal;
result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR/HSCTLR.I
if PSTATE.EL == EL2 then
cacheable = HSCTLR.I == '1';
else
cacheable = SCTLR.I == '1';
result.addrdesc.memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = MemAttr_WT;
result.addrdesc.memattrs.inner.hints = MemHint_RA;
else
result.addrdesc.memattrs.inner.attrs = MemAttr_NC;
result.addrdesc.memattrs.inner.hints = MemHint_No;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = ZeroExtend(vaddress);
result.addrdesc.paddress.NS = if IsSecure() then '0' else '1';
result.addrdesc.fault = AArch32.NoFaultEL0S1TranslationRegime;
else
return PSTATE.EL;();
result.descupdate.descaddr = result.addrdesc;
return result;
// AArch32.CheckDomain()
// =====================
// AArch32.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
(boolean, FaultRecord)bits(2) AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,AArch32.AccessUsesEL(
AccType acctype, boolean iswrite)
index = 2 *acctype)
if acctype == UIntAccType_UNPRIV(domain);
attrfield = DACR<index+1:index>;
if attrfield == '10' then // Reserved, maps to an allocated value
// Reserved value maps to an allocated value
(-, attrfield) =then
return ConstrainUnpredictableBitsEL0(Unpredictable_RESDACR);
if attrfield == '00' then
fault = AArch32.DomainFault(domain, level, acctype, iswrite);
else
fault = AArch32.NoFault();
permissioncheck = (attrfield == '01');
return (permissioncheck, fault);;
else
return PSTATE.EL;
// AArch32.CheckPermission()
// =========================
// Function used for permission checking from AArch32 stage 1 translations
// AArch32.CheckDomain()
// =====================
FaultRecord(boolean, FaultRecord) AArch32.CheckPermission(AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,Permissions perms, bits(32) vaddress, integer level,
bits(4) domain, bit NS, AccType acctype,
boolean iswrite)
assertacctype, boolean iswrite)
index = 2 * ELUsingAArch32UInt((domain);
attrfield = DACR<index+1:index>;
if attrfield == '10' then // Reserved, maps to an allocated value
// Reserved value maps to an allocated value
(-, attrfield) =S1TranslationRegimeConstrainUnpredictableBits());
if PSTATE.EL !=( EL2Unpredictable_RESDACR then
wxn = SCTLR.WXN == '1';
if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
else
priv_r = perms.ap<2:1> != '00';
priv_w = perms.ap<2:1> == '01';
user_r = perms.ap<1> == '1';
user_w = FALSE;
uwxn = SCTLR.UWXN == '1';
);
ispriv = if attrfield == '00' then
fault = AArch32.AccessUsesELAArch32.DomainFault(acctype) != EL0;
user_xn = !user_r || perms.xn == '1' || (user_w && wxn);
priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' ||
(priv_w && wxn) || (user_w && uwxn));
pan = if HavePANExt() then PSTATE.PAN else '0';
is_ldst = !(acctype IN {AccType_DC, AccType_AT,
AccType_ATPAN, AccType_IFETCH});
is_ats1xp = acctype == AccType_ATPAN;
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2
wxn = HSCTLR.WXN == '1';
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' then
secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF;
if secure_instr_fetch == '1' then xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w; // Report as a read failure if a read of the location would fail.
if fail then iswrite = r;
elsif acctype IN {AccType_IC, AccType_DC} then
// AArch32 IC/DC maintenance instructions operating by VA cannot fault.
fail = FALSE;
elsif iswrite then
fail = !w;
else
fail = !r;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(40) UNKNOWN;
return AArch32.PermissionFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
(domain, level, acctype, iswrite);
else
return fault = AArch32.NoFault();();
permissioncheck = (attrfield == '01');
return (permissioncheck, fault);
// AArch32.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch32 stage 2 translations
// AArch32.CheckPermission()
// =========================
// Function used for permission checking from AArch32 stage 1 translations
FaultRecord AArch32.CheckS2Permission(AArch32.CheckPermission(Permissions perms, bits(32) vaddress, bits(40) ipaddress,
integer level,perms, bits(32) vaddress, integer level,
bits(4) domain, bit NS, AccType acctype, boolean iswrite,
boolean s2fs1walk)
acctype,
boolean iswrite)
assert HaveELELUsingAArch32(S1TranslationRegime());
if PSTATE.EL != EL2) && !then
wxn = SCTLR.WXN == '1';
if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
else
priv_r = perms.ap<2:1> != '00';
priv_w = perms.ap<2:1> == '01';
user_r = perms.ap<1> == '1';
user_w = FALSE;
uwxn = SCTLR.UWXN == '1';
ispriv =IsSecureAArch32.AccessUsesEL() &&(acctype) != ELUsingAArch32EL0(;
user_xn = !user_r || perms.xn == '1' || (user_w && wxn);
priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' ||
(priv_w && wxn) || (user_w && uwxn));
pan = ifEL2HavePANExt) &&() then PSTATE.PAN else '0';
is_ldst = !(acctype IN { HasS2TranslationAccType_DC();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if, HaveExtendedExecuteNeverExtAccType_DC_UNPRIV() then
case perms.xn:perms.xxn of
when '00' xn = !r;
when '01' xn = !r || PSTATE.EL ==, EL1AccType_AT;
when '10' xn = TRUE;
when '11' xn = !r || PSTATE.EL ==, EL0AccType_IFETCH;
else
xn = !r || perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype ==});
is_ats1xp = (acctype == AccType_AT && AArch32.ExecutingATS1xPInstr());
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2
wxn = HSCTLR.WXN == '1';
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' then
secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF;
if secure_instr_fetch == '1' then xn = TRUE;
if acctype == AccType_IFETCH && !s2fs1walk then
then
fail = xn;
elsif (acctype IN { failedread = TRUE;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk then
fail = !r || !w; // Report as a read failure if a read of the location would fail.
if fail then iswrite = r;
elsif acctype IN {AccType_IC,} then
fail = !r || !w;
failedread = !r;
elsif acctype == AccType_DC} && !s2fs1walk then
// AArch32 IC/DC maintenance instructions operating by VA cannot fault.
then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation.
fail = FALSE;
elsif iswrite && !s2fs1walk then
elsif iswrite then
fail = !w;
failedread = FALSE;
else
fail = !r;
failedread = TRUE;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(40) UNKNOWN;
return AArch32.PermissionFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
!failedread, secondstage, s2fs1walk);
else
return AArch32.NoFault();
// AArch32.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime, when either debug exceptions are enabled, or halting debug is enabled
// and halting is allowed.
// AArch32.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch32 stage 2 translations
FaultRecord AArch32.CheckBreakpoint(bits(32) vaddress, integer size)
assertAArch32.CheckS2Permission( Permissions perms, bits(32) vaddress, bits(40) ipaddress,
integer level, AccType acctype, boolean iswrite,
boolean s2fs1walk)
assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(S1TranslationRegimeEL2());
assert size IN {2,4};
match = FALSE;
mismatch = FALSE;
for i = 0 to) && GetNumBreakpointsHasS2Translation() - 1
(match_i, mismatch_i) =();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if AArch32.BreakpointMatchHaveExtendedExecuteNeverExt(i, vaddress, size);
match = match || match_i;
mismatch = mismatch || mismatch_i;
if match &&() then
case perms.xn:perms.xxn of
when '00' xn = !r;
when '01' xn = !r || PSTATE.EL == HaltOnBreakpointOrWatchpointEL1() then
reason =;
when '10' xn = TRUE;
when '11' xn = !r || PSTATE.EL == DebugHalt_BreakpointEL0;;
else
xn = !r || perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype ==
Halt(reason);
elsif (match || mismatch) then
acctype = AccType_IFETCH;
iswrite = FALSE;
debugmoe =&& !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif (acctype IN { DebugException_BreakpointAccType_ATOMICRW;
return, , AccType_ORDEREDATOMICRW }) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif acctype == AccType_DC && !s2fs1walk then
// DC maintenance instructions operating by VA, do not generate Permission faults
// from stage 2 translation, other than from stage 1 translation table walk.
fail = FALSE;
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
else
fail = !r;
failedread = !iswrite;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch32.PermissionFaultAArch32.DebugFaultAccType_ORDEREDRW(acctype, iswrite, debugmoe);
(ipaddress, domain, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch32.NoFault();
// AArch32.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
// AArch32.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime, when either debug exceptions are enabled, or halting debug is enabled
// and halting is allowed.
FaultRecord AArch32.CheckDebug(bits(32) vaddress,AArch32.CheckBreakpoint(bits(32) vaddress, integer size)
assert AccTypeELUsingAArch32 acctype, boolean iswrite, integer size)(
FaultRecordS1TranslationRegime fault =());
assert size IN {2,4};
match = FALSE;
mismatch = FALSE;
for i = 0 to AArch32.NoFaultUInt();
d_side = (acctype !=(DBGDIDR.BRPs)
(match_i, mismatch_i) = AccType_IFETCHAArch32.BreakpointMatch);
generate_exception =(i, vaddress, size);
match = match || match_i;
mismatch = mismatch || mismatch_i;
if match && AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1';
halt = HaltOnBreakpointOrWatchpoint();
// Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture
vector_catch_first =() then
reason = ConstrainUnpredictableBoolDebugHalt_Breakpoint(;Unpredictable_BPVECTORCATCHPRIHalt);
if !d_side && vector_catch_first && generate_exception then
fault =(reason);
elsif (match || mismatch) then
acctype = AArch32.CheckVectorCatchAccType_IFETCH(vaddress, size);
if fault.statuscode ==;
iswrite = FALSE;
debugmoe = Fault_NoneDebugException_Breakpoint && (generate_exception || halt) then
if d_side then
fault =;
return AArch32.CheckWatchpointAArch32.DebugFault(vaddress, acctype, iswrite, size);
else
fault =(acctype, iswrite, debugmoe);
else
return AArch32.CheckBreakpointAArch32.NoFault(vaddress, size);
if fault.statuscode == Fault_None && !d_side && !vector_catch_first && generate_exception then
return AArch32.CheckVectorCatch(vaddress, size);
return fault;();
// AArch32.CheckVectorCatch()
// ==========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime, when debug exceptions are enabled.
// AArch32.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
FaultRecord AArch32.CheckVectorCatch(bits(32) vaddress, integer size)
assertAArch32.CheckDebug(bits(32) vaddress, ELUsingAArch32AccType(acctype, boolean iswrite, integer size)S1TranslationRegimeFaultRecord());
match =fault = AArch32.VCRMatchAArch32.NoFault(vaddress);
if size == 4 && !match &&();
d_side = (acctype != AArch32.VCRMatchAccType_IFETCH(vaddress + 2) then
match =);
generate_exception = AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1';
halt = HaltOnBreakpointOrWatchpoint();
// Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture
vector_catch_first = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALFUnpredictable_BPVECTORCATCHPRI);
if match then
acctype = if !d_side && vector_catch_first && generate_exception then
fault = AccType_IFETCHAArch32.CheckVectorCatch;
iswrite = FALSE;
debugmoe =(vaddress, size);
if fault.statuscode == DebugException_VectorCatchFault_None;
return&& (generate_exception || halt) then
if d_side then
fault = AArch32.DebugFaultAArch32.CheckWatchpoint(acctype, iswrite, debugmoe);
else
return(vaddress, acctype, iswrite, size);
else
fault = (vaddress, size);
if fault.statuscode == Fault_None && !d_side && !vector_catch_first && generate_exception then
return AArch32.CheckVectorCatchAArch32.NoFaultAArch32.CheckBreakpoint();(vaddress, size);
return fault;
// AArch32.CheckWatchpoint()
// =========================
// Called before accessing the memory location of "size" bytes at "address",
// when either debug exceptions are enabled for the access, or halting debug
// is enabled and halting is allowed.
// AArch32.CheckVectorCatch()
// ==========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime, when debug exceptions are enabled.
FaultRecord AArch32.CheckWatchpoint(bits(32) vaddress,AArch32.CheckVectorCatch(bits(32) vaddress, integer size)
assert AccType acctype,
boolean iswrite, integer size)
assert ELUsingAArch32(S1TranslationRegime());
if acctype IN {
match =AccType_TTWAArch32.VCRMatch,(vaddress);
if size == 4 && !match && AccType_ICAArch32.VCRMatch,(vaddress + 2) then
match = AccType_ATConstrainUnpredictableBool,( AccType_ATPANUnpredictable_VCMATCHHALF} then
return);
if match then
acctype = AArch32.NoFaultAccType_IFETCH();
if acctype ==;
iswrite = FALSE;
debugmoe = AccType_DCDebugException_VectorCatch then
if !iswrite then
return AArch32.NoFault();
elsif !(boolean IMPLEMENTATION_DEFINED "DCIMVAC generates watchpoint") then
return AArch32.NoFault();
match = FALSE;
ispriv = AArch32.AccessUsesEL(acctype) != EL0;
for i = 0 to GetNumWatchpoints() - 1
if AArch32.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite) then
match = TRUE;
if match && HaltOnBreakpointOrWatchpoint() then
reason = DebugHalt_Watchpoint;
EDWAR = vaddress;
Halt(reason);
elsif match then
debugmoe = DebugException_Watchpoint;
return AArch32.DebugFault(acctype, iswrite, debugmoe);
else
return AArch32.NoFault();
// AArch32.AccessFlagFault()
// AArch32.CheckWatchpoint()
// =========================
// Called before accessing the memory location of "size" bytes at "address",
// when either debug exceptions are enabled for the access, or halting debug
// is enabled and halting is allowed.
FaultRecord AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.CheckWatchpoint(bits(32) vaddress,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
returnacctype,
boolean iswrite, integer size)
assert AArch32.CreateFaultRecordELUsingAArch32(());
if acctype IN {AccType_TTW, AccType_IC, AccType_AT} then
return AArch32.NoFault();
if acctype == AccType_DC then
if !iswrite then
return AArch32.NoFault();
elsif !(boolean IMPLEMENTATION_DEFINED "DCIMVAC generates watchpoint") then
return AArch32.NoFault();
match = FALSE;
ispriv = AArch32.AccessUsesEL(acctype) != EL0;
for i = 0 to UInt(DBGDIDR.WRPs)
match = match || AArch32.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite);
if match && HaltOnBreakpointOrWatchpoint() then
reason = DebugHalt_Watchpoint;
EDWAR = vaddress;
Halt(reason);
elsif match then
debugmoe = DebugException_Watchpoint;
return AArch32.DebugFault(acctype, iswrite, debugmoe);
else
return AArch32.NoFaultFault_AccessFlagS1TranslationRegime, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);();
// AArch32.AddressSizeFault()
// ==========================
// AArch32.AccessFlagFault()
// =========================
FaultRecord AArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch32.CreateFaultRecord(Fault_AddressSizeFault_AccessFlag, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AlignmentFault()
// ========================
// AArch32.AddressSizeFault()
// ==========================
FaultRecord AArch32.AlignmentFault(AArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level,AccType acctype, boolean iswrite, boolean secondstage)
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return AArch32.CreateFaultRecord(Fault_AlignmentFault_AddressSize, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
// AArch32.AlignmentFault()
// ========================
FaultRecord AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAArch32.AlignmentFault( Fault_AsyncParityAccType elseacctype, boolean iswrite, boolean secondstage)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return Fault_AsyncExternalAArch32.CreateFaultRecord;
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype =( AccType_NORMALFault_Alignment;
iswrite = boolean UNKNOWN;
debugmoe = bits(4) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(faulttype, ipaddress, domain, level, acctype, iswrite, extflag,
debugmoe, errortype, secondstage, s2fs1walk);, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.DebugFault()
// ====================
// AArch32.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
FaultRecord AArch32.DebugFault(AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAccTypeFault_AsyncParity acctype, boolean iswrite, bits(4) debugmoe)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
returnelse Fault_AsyncExternal;
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype = AccType_NORMAL;
iswrite = boolean UNKNOWN;
debugmoe = bits(4) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_Debug, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);(faulttype, ipaddress, domain, level, acctype, iswrite, extflag,
debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.DomainFault()
// =====================
// AArch32.DebugFault()
// ====================
FaultRecord AArch32.DomainFault(bits(4) domain, integer level,AArch32.DebugFault( AccType acctype, boolean iswrite)
acctype, boolean iswrite, bits(4) debugmoe)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_DomainFault_Debug, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.NoFault()
// =================
// AArch32.DomainFault()
// =====================
FaultRecord AArch32.NoFault()
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype =AArch32.DomainFault(bits(4) domain, integer level, AccType_NORMALAccType;
iswrite = boolean UNKNOWN;
acctype, boolean iswrite)
ipaddress = bits(40) UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_NoneFault_Domain, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.PermissionFault()
// =========================
// AArch32.NoFault()
// =================
FaultRecord AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.NoFault()
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype =
AccTypeAccType_NORMAL acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
;
iswrite = boolean UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_PermissionFault_None, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.TranslationFault()
// ==========================
// AArch32.PermissionFault()
// =========================
FaultRecord AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch32.CreateFaultRecord(Fault_TranslationFault_Permission, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch32.TranslationFault()
// ==========================
AddressDescriptorFaultRecord AArch32.FirstStageTranslate(bits(32) vaddress,AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
if PSTATE.EL == extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return EL2AArch32.CreateFaultRecord then
s1_enabled = HSCTLR.M == '1';
elsif( EL2EnabledFault_Translation() then
tge = (if ELUsingAArch32(EL2) then HCR.TGE else HCR_EL2.TGE);
dc = (if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC);
s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1';
else
s1_enabled = SCTLR.M == '1';
TLBRecord S1;
S1.addrdesc.fault = AArch32.NoFault();
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
permissioncheck = TRUE;
if s1_enabled then // First stage enabled
use_long_descriptor_format = PSTATE.EL == EL2 || TTBCR.EAE == '1';
if use_long_descriptor_format then
S1 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE; domaincheck = FALSE;
else
S1 = AArch32.TranslationTableWalkSD(vaddress, acctype, iswrite, size);
permissioncheck = TRUE; domaincheck = TRUE;
else
S1 = AArch32.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE; domaincheck = FALSE;
SetInGuardedPage(FALSE); // No memory is guarded when stage 1 address translation is disabled
if !IsFault(S1.addrdesc) && UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && acctype == AccType_A32LSMD then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) &&
!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
if !IsFault(S1.addrdesc) && domaincheck && !(acctype IN {AccType_DC, AccType_IC}) then
(permissioncheck, abort) = AArch32.CheckDomain(S1.domain, vaddress, S1.level, acctype,
iswrite);
S1.addrdesc.fault = abort;
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level,
S1.domain, S1.addrdesc.paddress.NS,
acctype,
iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch32.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
S1.domain, acctype, iswrite,
secondstage, s2fs1walk);
return S1.addrdesc;, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
// AArch32.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
AddressDescriptor AArch32.FullTranslate(bits(32) vaddress,AArch32.FirstStageTranslate(bits(32) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
boolean wasaligned, integer size)
// First Stage Translation
S1 = if PSTATE.EL == AArch32.FirstStageTranslateEL2(vaddress, acctype, iswrite, wasaligned, size);
if !then
s1_enabled = HSCTLR.M == '1';
elsifEL2Enabled() then
tge = (if ELUsingAArch32(EL2) then HCR.TGE else HCR_EL2.TGE);
dc = (if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC);
s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1';
else
s1_enabled = SCTLR.M == '1';
TLBRecord S1;
S1.addrdesc.fault = AArch32.NoFault();
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if s1_enabled then // First stage enabled
use_long_descriptor_format = PSTATE.EL == EL2 || TTBCR.EAE == '1';
if use_long_descriptor_format then
S1 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE; domaincheck = FALSE;
else
S1 = AArch32.TranslationTableWalkSD(vaddress, acctype, iswrite, size);
permissioncheck = TRUE; domaincheck = TRUE;
else
S1 = AArch32.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE; domaincheck = FALSE;
InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled
if !IsFault(S1) &&(S1.addrdesc) && HasS2TranslationUsingAArch32() then
s2fs1walk = FALSE;
result =() && () && AArch32.ExecutingLSMInstr() then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) &&
!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
if !IsFault(S1.addrdesc) && domaincheck && !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC}) then
(permissioncheck, abort) = AArch32.CheckDomain(S1.domain, vaddress, S1.level, acctype,
iswrite);
S1.addrdesc.fault = abort;
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level,
S1.domain, S1.addrdesc.paddress.NS,
acctype,
iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch32.InstructionDeviceAArch32.SecondStageTranslateHaveTrapLoadStoreMultipleDeviceExt(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
else
result = S1;
(S1.addrdesc, vaddress, ipaddress, S1.level,
S1.domain, acctype, iswrite,
secondstage, s2fs1walk);
return result; return S1.addrdesc;
// AArch32.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch32.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
AddressDescriptor AArch32.SecondStageTranslate(AArch32.FullTranslate(bits(32) vaddress,AddressDescriptor S1, bits(32) vaddress,
AccType acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size)
assertacctype, boolean iswrite,
boolean wasaligned, integer size)
// First Stage Translation
S1 = HasS2TranslationAArch32.FirstStageTranslate();
assert(vaddress, acctype, iswrite, wasaligned, size);
if ! IsZero(S1.paddress.address<47:40>);
hwupdatewalk = FALSE;
if !ELUsingAArch32(EL2) then
return AArch64.SecondStageTranslate(S1, ZeroExtend(vaddress, 64), acctype, iswrite,
wasaligned, s2fs1walk, size, hwupdatewalk);
s2_enabled = HCR.VM == '1' || HCR.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
permissioncheck = TRUE;
ipaddress = S1.paddress.address<39:0>;
S2 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if !IsFault(S2.addrdesc) && !s2fs1walk && S2.addrdesc.memattrs.memtype ==(S1) && MemType_DeviceHasS2Translation
&& ((!wasaligned && acctype !=() then
s2fs1walk = FALSE;
result = AccType_IFETCHAArch32.SecondStageTranslate) || acctype == AccType_DCZVA) then
S2.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if !IsFault(S2.addrdesc) && permissioncheck then
S2.addrdesc.fault = AArch32.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, s2fs1walk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
domain = bits(4) UNKNOWN;
S2.addrdesc = AArch32.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
domain, acctype, iswrite,
secondstage, s2fs1walk);
if (s2fs1walk && !IsFault(S2.addrdesc) &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
// Check for protected table walk.
if HCR.PTW == '1' then
domain = bits(4) UNKNOWN;
S2.addrdesc.fault = AArch32.PermissionFault(ipaddress,
domain, S2.level,
acctype, iswrite, secondstage, s2fs1walk);
else
// Translation table walk occurs as Normal Non-cacheable memory.
S2.addrdesc.memattrs.memtype = MemType_Normal;
S2.addrdesc.memattrs.inner.attrs = MemAttr_NC;
S2.addrdesc.memattrs.outer.attrs = MemAttr_NC;
S2.addrdesc.memattrs.shareable = TRUE;
S2.addrdesc.memattrs.outershareable = TRUE;
if s2fs1walk then
result = AArch32.CombineS1S2Desc(S1, S2.addrdesc, AccType_TTW);
else
result = AArch32.CombineS1S2Desc(S1, S2.addrdesc, acctype);
(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
else
result = S1;
return result;
// AArch32.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation table walk access.
// AArch32.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
AddressDescriptor AArch32.SecondStageWalk(AArch32.SecondStageTranslate(AddressDescriptor S1, bits(32) vaddress, AccType acctype,
boolean iswrite, integer size)
acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size)
assert HasS2Translation();
s2fs1walk = TRUE;
wasaligned = TRUE;
return assert (S1.paddress.address<47:40>);
hwupdatewalk = FALSE;
if !ELUsingAArch32(EL2) then
return AArch64.SecondStageTranslate(S1, ZeroExtend(vaddress, 64), acctype, iswrite,
wasaligned, s2fs1walk, size, hwupdatewalk);
s2_enabled = HCR.VM == '1' || HCR.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<39:0>;
S2 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) ||
(acctype == AccType_DCZVA && !s2fs1walk)) &&
S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S2.addrdesc)) then
S2.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if !IsFault(S2.addrdesc) then
S2.addrdesc.fault = AArch32.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, s2fs1walk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
domain = bits(4) UNKNOWN;
S2.addrdesc = AArch32.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
domain, acctype, iswrite,
secondstage, s2fs1walk);
if (s2fs1walk && !IsFault(S2.addrdesc) &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
// Check for protected table walk.
if HCR.PTW == '1' then
domain = bits(4) UNKNOWN;
S2.addrdesc.fault = AArch32.PermissionFault(ipaddress,
domain, S2.level,
acctype, iswrite, secondstage, s2fs1walk);
else
// Translation table walk occurs as Normal Non-cacheable memory.
S2.addrdesc.memattrs.memtype = MemType_Normal;
S2.addrdesc.memattrs.inner.attrs = MemAttr_NC;
S2.addrdesc.memattrs.outer.attrs = MemAttr_NC;
S2.addrdesc.memattrs.shareable = TRUE;
S2.addrdesc.memattrs.outershareable = TRUE;
if s2fs1walk then
result = AArch32.CombineS1S2Desc(S1, S2.addrdesc, AccType_TTW);
else
result = AArch32.CombineS1S2DescAArch32.SecondStageTranslateIsZero(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);(S1, S2.addrdesc, acctype);
else
result = S1;
return result;
// AArch32.TranslateAddress()
// ==========================
// Main entry point for translating an address
// AArch32.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation table walk access.
AddressDescriptor AArch32.TranslateAddress(bits(32) vaddress,AArch32.SecondStageWalk( AddressDescriptor S1, bits(32) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
acctype,
boolean iswrite, integer size)
if ! assertELUsingAArch32HasS2Translation(();
s2fs1walk = TRUE;
wasaligned = TRUE;
returnS1TranslationRegimeAArch32.SecondStageTranslate()) then
return AArch64.TranslateAddress(ZeroExtend(vaddress, 64), acctype, iswrite, wasaligned,
size);
result = AArch32.FullTranslate(vaddress, acctype, iswrite, wasaligned, size);
if !IsFault(result) then
result.fault = AArch32.CheckDebug(vaddress, acctype, iswrite, size);
// Update virtual address for abort functions
result.vaddress = ZeroExtend(vaddress);
return result;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
// AArch32.TranslationTableWalkLD()
// ================================
// Returns a result of a translation table walk using the Long-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
// AArch32.TranslateAddress()
// ==========================
// Main entry point for translating an address
TLBRecordAddressDescriptor AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress,AArch32.TranslateAddress(bits(32) vaddress,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assertacctype, boolean iswrite,
boolean wasaligned, integer size)
if ! ELUsingAArch32(S1TranslationRegime());
else
assert()) then
return HaveELAArch64.TranslateAddress(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
domain = bits(4) UNKNOWN;
descaddr.memattrs.memtype = MemType_Normal;
// Fixed parameters for the translation table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32
// stride = Log2(Address per Level) - Bits of address consumed at each level
constant integer grainsize = 12; // Log2(4KB page size)
constant integer stride = grainsize - 3; // Log2(page size / 8 bytes)
// Derived parameters for the translation table walk:
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el =(vaddress, 64), acctype, iswrite, wasaligned,
size);
result = AArch32.AccessUsesELAArch32.FullTranslate(acctype);
isprivileged =(vaddress, acctype, iswrite, wasaligned, size);
if ! AArch32.AccessUsesEL(acctype) != EL0;
if el == EL2 then
inputsize = 32 - UInt(HTCR.T0SZ);
basefound = inputsize == 32 || IsZero(inputaddr<31:inputsize>);
disabled = FALSE;
baseregister = HTTBR;
descaddr.memattrs = WalkAttrDecode(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
lookupsecure = FALSE;
singlepriv = TRUE;
hierattrsdisabled = AArch32.HaveHPDExt() && HTCR.HPD == '1';
else
basefound = FALSE;
disabled = FALSE;
t0size = UInt(TTBCR.T0SZ);
if t0size == 0 || IsZero(inputaddr<31:(32-t0size)>) then
inputsize = 32 - t0size;
basefound = TRUE;
baseregister = TTBR0;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage);
hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1';
t1size = UInt(TTBCR.T1SZ);
if (t1size == 0 && !basefound) || (t1size > 0 && IsOnes(inputaddr<31:(32-t1size)>)) then
inputsize = 32 - t1size;
basefound = TRUE;
baseregister = TTBR1;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage);
hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1';
reversedescriptors = SCTLR.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride));
else
// Second stage translation
inputaddr = ipaddress;
inputsize = 32 - SInt(VTCR.T0SZ);
// VTCR.S must match VTCR.T0SZ[3]
if VTCR.S != VTCR.T0SZ<3> then
(-, inputsize) = ConstrainUnpredictableInteger(32-7, 32+8, Unpredictable_RESVTCRS);
basefound = inputsize == 40 || IsZero(inputaddr<39:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
singlepriv = TRUE;
lookupsecure = FALSE;
baseregister = VTTBR;
startlevel = UInt(VTCR.SL0);
level = 2 - startlevel;
if level <= 0 then basefound = FALSE;
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
// That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 1; // AArch64 reports this as a level 0 fault
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
if !IsZero(baseregister<47:40>) then
level = 0;
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
baseaddress = baseregister<39:baselowerbound>:Zeros(baselowerbound);
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(40) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = ZeroExtend(baseaddress OR index);
descaddr.paddress.NS = ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation() then
descaddr2 = descaddr;
else
descaddr2 = AArch32.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8);
// Check for a fault on the stage 2 walk
if IsFault(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress =(result) then
result.fault = ZeroExtendAArch32.CheckDebug(vaddress);
(vaddress, acctype, iswrite, size);
accdesc = // Update virtual address for abort functions
result.vaddress = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc, iswrite];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then
// Fault (00), Reserved (10), or Block (01) at level 3.
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if !IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
baseaddress = desc<39:grainsize>:Zeros(grainsize);
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply only in EL1&0 translation regimes
if !singlepriv then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Unpack the descriptor into address and upper and lower block attributes
outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
// Check the output address is inside the supported range
if !IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check the access flag
if desc<10> == '0' then
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch32.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_TTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = '1';
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.fault = AArch32.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>;
(vaddress);
return result;
// AArch32.TranslationTableWalkSD()
// AArch32.TranslationTableWalkLD()
// ================================
// Returns a result of a translation table walk using the Short-descriptor format
// Returns a result of a translation table walk using the Long-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
TLBRecord AArch32.TranslationTableWalkSD(bits(32) vaddress,AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress, AccType acctype, boolean iswrite,
integer size)
assertacctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assert ELUsingAArch32(S1TranslationRegime());
// This is only called when address translation is enabled else
assert
HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation();
TLBRecord result;
AddressDescriptor l1descaddr;descaddr;
bits(64) baseregister;
bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
domain = bits(4) UNKNOWN;
descaddr.memattrs.memtype =
AddressDescriptorMemType_Normal l2descaddr;
bits(40) outputaddress;
;
// Variables for Abort functions
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
NS = bit UNKNOWN;
// Fixed parameters for the translation table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32
// stride = Log2(Address per Level) - Bits of address consumed at each level
constant integer grainsize = 12; // Log2(4KB page size)
constant integer stride = grainsize - 3; // Log2(page size / 8 bytes)
// Default setting of the domain and level.
domain = bits(4) UNKNOWN;
level = 1;
// Derived parameters for the translation table walk:
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
// Determine correct Translation Table Base Register to use.
bits(64) ttbr;
n = if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el = AArch32.AccessUsesEL(acctype);
isprivileged = AArch32.AccessUsesEL(acctype) != EL0;
if el == EL2 then
inputsize = 32 - UInt(TTBCR.N);
if n == 0 ||(HTCR.T0SZ);
basefound = inputsize == 32 || IsZero(vaddress<31:(32-n)>) then
ttbr = TTBR0;
disabled = (TTBCR.PD0 == '1');
else
ttbr = TTBR1;
disabled = (TTBCR.PD1 == '1');
n = 0; // TTBR1 translation always works like N=0 TTBR0 translation
// Check if Translation table walk disabled for translations with this Base register.
if disabled then
result.addrdesc.fault =(inputaddr<31:inputsize>);
disabled = FALSE;
baseregister = HTTBR;
descaddr.memattrs = AArch32.TranslationFaultWalkAttrDecode(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Obtain descriptor from initial lookup.
l1descaddr.paddress.address =(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
lookupsecure = FALSE;
singlepriv = TRUE;
hierattrsdisabled = ZeroExtendAArch32.HaveHPDExt(ttbr<31:14-n>:vaddress<31-n:20>:'00');
l1descaddr.paddress.NS = if() && HTCR.HPD == '1';
else
basefound = FALSE;
disabled = FALSE;
t0size = IsSecureUInt() then '0' else '1';
IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN
RGN = ttbr<4:3>; // TTBR.RGN
SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS
l1descaddr.memattrs =(TTBCR.T0SZ);
if t0size == 0 || IsZero(inputaddr<31:(32-t0size)>) then
inputsize = 32 - t0size;
basefound = TRUE;
baseregister = TTBR0;
descaddr.memattrs = WalkAttrDecode(SH, RGN, IRGN, secondstage);
if !(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage);
hierattrsdisabled =HasS2TranslationAArch32.HaveHPDExt() then
// if only 1 stage of translation
l1descaddr2 = l1descaddr;
else
l1descaddr2 =() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1';
t1size = AArch32.SecondStageWalkUInt(l1descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if(TTBCR.T1SZ);
if (t1size == 0 && !basefound) || (t1size > 0 && IsFaultIsOnes(l1descaddr2) then
result.addrdesc.fault = l1descaddr2.fault;
return result;
// Update virtual address for abort functions
l1descaddr2.vaddress =(inputaddr<31:(32-t1size)>)) then
inputsize = 32 - t1size;
basefound = TRUE;
baseregister = TTBR1;
descaddr.memattrs = ZeroExtendWalkAttrDecode(vaddress);
accdesc =(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage);
hierattrsdisabled = CreateAccessDescriptorTTWAArch32.HaveHPDExt(acctype, secondstage, s2fs1walk, level);
l1desc = _Mem[l1descaddr2, 4, accdesc, iswrite];
if SCTLR.EE == '1' then l1desc =() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1';
reversedescriptors = SCTLR.EE == '1';
lookupsecure = BigEndianReverseIsSecure(l1desc);
();
singlepriv = FALSE;
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride));
// Process descriptor from initial lookup.
case l1desc<1:0> of
when '00' // Fault, Reserved
result.addrdesc.fault = else
// Second stage translation
inputaddr = ipaddress;
inputsize = 32 - SInt(VTCR.T0SZ);
// VTCR.S must match VTCR.T0SZ[3]
if VTCR.S != VTCR.T0SZ<3> then
(-, inputsize) = ConstrainUnpredictableInteger(32-7, 32+8, Unpredictable_RESVTCRS);
basefound = inputsize == 40 || IsZero(inputaddr<39:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
singlepriv = TRUE;
lookupsecure = FALSE;
baseregister = VTTBR;
startlevel = UInt(VTCR.SL0);
level = 2 - startlevel;
if level <= 0 then basefound = FALSE;
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
// That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 1; // AArch64 reports this as a level 0 fault
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
when '01' // Large page or Small page
domain = l1desc<8:5>;
level = 2;
pxn = l1desc<2>;
NS = l1desc<3>;
// Obtain descriptor from level 2 lookup.
l2descaddr.paddress.address = if ! ZeroExtendIsZero(l1desc<31:10>:vaddress<19:12>:'00');
l2descaddr.paddress.NS = if(baseregister<47:40>) then
level = 0;
result.addrdesc.fault = IsSecureAArch32.AddressSizeFault() then '0' else '1';
l2descaddr.memattrs = l1descaddr.memattrs;
(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
if ! // Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
baseaddress = baseregister<39:baselowerbound>:HaveELZeros((baselowerbound);
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(40) index =EL2ZeroExtend) || ((inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address =IsSecureZeroExtend() && !(baseaddress OR index);
descaddr.paddress.NS = ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !IsSecureEL2EnabledHasS2Translation()) then
// if only 1 stage of translation
l2descaddr2 = l2descaddr;
else
l2descaddr2 =() then
descaddr2 = descaddr;
else
descaddr2 = AArch32.SecondStageWalk(l2descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if(descaddr, vaddress, acctype, iswrite, 8);
// Check for a fault on the stage 2 walk
if IsFault(l2descaddr2) then
result.addrdesc.fault = l2descaddr2.fault;
return result;
(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
l2descaddr2.vaddress = // Update virtual address for abort functions
descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
l2desc = _Mem[l2descaddr2, 4, accdesc, iswrite];
desc = _Mem[descaddr2, 8, accdesc, iswrite];
if SCTLR.EE == '1' then l2desc = if reversedescriptors then desc = BigEndianReverse(l2desc);
(desc);
// Process descriptor from level 2 lookup.
if l2desc<1:0> == '00' then
result.addrdesc.fault = if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then
// Fault (00), Reserved (10), or Block (01) at level 3.
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
nG = l2desc<11>;
S = l2desc<10>;
ap = l2desc<9,5:4>;
if SCTLR.AFE == '1' && l2desc<4> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = // Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if ! AArch32.AccessFlagFaultIsZero(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l2desc<1> == '0' then // Large page
xn = l2desc<15>;
tex = l2desc<14:12>;
c = l2desc<3>;
b = l2desc<2>;
blocksize = 64;
outputaddress =(desc<47:40>) then
result.addrdesc.fault = ZeroExtendAArch32.AddressSizeFault(l2desc<31:16>:vaddress<15:0>);
else // Small page
tex = l2desc<8:6>;
c = l2desc<3>;
b = l2desc<2>;
xn = l2desc<0>;
blocksize = 4;
outputaddress =(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
baseaddress = desc<39:grainsize>: ZeroExtendZeros(l2desc<31:12>:vaddress<11:0>);
(grainsize);
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
when '1x' // Section or Supersection
NS = l1desc<19>;
nG = l1desc<17>;
S = l1desc<16>;
ap = l1desc<15,11:10>;
tex = l1desc<14:12>;
xn = l1desc<4>;
c = l1desc<3>;
b = l1desc<2>;
pxn = l1desc<0>;
level = 1;
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply only in EL1&0 translation regimes
if !singlepriv then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
if SCTLR.AFE == '1' && l1desc<10> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Unpack the descriptor into address and upper and lower block attributes
outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
// Check the output address is inside the supported range
if ! IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check the access flag
if desc<10> == '0' then
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
if l1desc<18> == '0' then // Section
domain = l1desc<8:5>;
blocksize = 1024;
outputaddress = result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if ZeroExtendIsSecure(l1desc<31:20>:vaddress<19:0>);
else // Supersection
domain = '0000';
blocksize = 16384;
outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>;
// Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes
if SCTLR.TRE == '0' then
if() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = RemapRegsHaveResetValuesAArch32.S1AttrDecode() then
result.addrdesc.memattrs =(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if AArch32.DefaultTEXDecodeHaveExtendedExecuteNeverExt(tex, c, b, S, acctype);
else
() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = MemoryAttributesS2AttrDecode IMPLEMENTATION_DEFINED;
else
result.addrdesc.memattrs =(sh, memattr, AArch32.RemappedTEXDecodeAccType_TTW(tex, c, b, S, acctype);
// Set the rest of the TLBRecord, try to add it to the TLB, and return it.
result.perms.ap = ap;
result.perms.xn = xn;
result.perms.pxn = pxn;
result.nG = nG;
result.domain = domain;
result.level = level;
result.blocksize = blocksize;
result.addrdesc.paddress.address =);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = '1';
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.paddress.NS = if result.addrdesc.fault = IsSecureAArch32.NoFault() then NS else '1';
result.addrdesc.fault =();
result.contiguous = contiguousbit == '1';
if AArch32.NoFaultHaveCommonNotPrivateTransExt();
() then result.CnP = baseregister<0>;
return result;
boolean// AArch32.TranslationTableWalkSD()
// ================================
// Returns a result of a translation table walk using the Short-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
TLBRecord RemapRegsHaveResetValues();AArch32.TranslationTableWalkSD(bits(32) vaddress,AccType acctype, boolean iswrite,
integer size)
assert ELUsingAArch32(S1TranslationRegime());
// This is only called when address translation is enabled
TLBRecord result;
AddressDescriptor l1descaddr;
AddressDescriptor l2descaddr;
bits(40) outputaddress;
// Variables for Abort functions
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
NS = bit UNKNOWN;
// Default setting of the domain and level.
domain = bits(4) UNKNOWN;
level = 1;
// Determine correct Translation Table Base Register to use.
bits(64) ttbr;
n = UInt(TTBCR.N);
if n == 0 || IsZero(vaddress<31:(32-n)>) then
ttbr = TTBR0;
disabled = (TTBCR.PD0 == '1');
else
ttbr = TTBR1;
disabled = (TTBCR.PD1 == '1');
n = 0; // TTBR1 translation always works like N=0 TTBR0 translation
// Check if Translation table walk disabled for translations with this Base register.
if disabled then
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Obtain descriptor from initial lookup.
l1descaddr.paddress.address = ZeroExtend(ttbr<31:14-n>:vaddress<31-n:20>:'00');
l1descaddr.paddress.NS = if IsSecure() then '0' else '1';
IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN
RGN = ttbr<4:3>; // TTBR.RGN
SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS
l1descaddr.memattrs = WalkAttrDecode(SH, RGN, IRGN, secondstage);
if !HasS2Translation() then
// if only 1 stage of translation
l1descaddr2 = l1descaddr;
else
l1descaddr2 = AArch32.SecondStageWalk(l1descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if IsFault(l1descaddr2) then
result.addrdesc.fault = l1descaddr2.fault;
return result;
// Update virtual address for abort functions
l1descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
l1desc = _Mem[l1descaddr2, 4, accdesc, iswrite];
if SCTLR.EE == '1' then l1desc = BigEndianReverse(l1desc);
// Process descriptor from initial lookup.
case l1desc<1:0> of
when '00' // Fault, Reserved
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
when '01' // Large page or Small page
domain = l1desc<8:5>;
level = 2;
pxn = l1desc<2>;
NS = l1desc<3>;
// Obtain descriptor from level 2 lookup.
l2descaddr.paddress.address = ZeroExtend(l1desc<31:10>:vaddress<19:12>:'00');
l2descaddr.paddress.NS = if IsSecure() then '0' else '1';
l2descaddr.memattrs = l1descaddr.memattrs;
if !HaveEL(EL2) || (IsSecure() && !IsSecureEL2Enabled()) then
// if only 1 stage of translation
l2descaddr2 = l2descaddr;
else
l2descaddr2 = AArch32.SecondStageWalk(l2descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if IsFault(l2descaddr2) then
result.addrdesc.fault = l2descaddr2.fault;
return result;
// Update virtual address for abort functions
l2descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
l2desc = _Mem[l2descaddr2, 4, accdesc, iswrite];
if SCTLR.EE == '1' then l2desc = BigEndianReverse(l2desc);
// Process descriptor from level 2 lookup.
if l2desc<1:0> == '00' then
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
nG = l2desc<11>;
S = l2desc<10>;
ap = l2desc<9,5:4>;
if SCTLR.AFE == '1' && l2desc<4> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l2desc<1> == '0' then // Large page
xn = l2desc<15>;
tex = l2desc<14:12>;
c = l2desc<3>;
b = l2desc<2>;
blocksize = 64;
outputaddress = ZeroExtend(l2desc<31:16>:vaddress<15:0>);
else // Small page
tex = l2desc<8:6>;
c = l2desc<3>;
b = l2desc<2>;
xn = l2desc<0>;
blocksize = 4;
outputaddress = ZeroExtend(l2desc<31:12>:vaddress<11:0>);
when '1x' // Section or Supersection
NS = l1desc<19>;
nG = l1desc<17>;
S = l1desc<16>;
ap = l1desc<15,11:10>;
tex = l1desc<14:12>;
xn = l1desc<4>;
c = l1desc<3>;
b = l1desc<2>;
pxn = l1desc<0>;
level = 1;
if SCTLR.AFE == '1' && l1desc<10> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l1desc<18> == '0' then // Section
domain = l1desc<8:5>;
blocksize = 1024;
outputaddress = ZeroExtend(l1desc<31:20>:vaddress<19:0>);
else // Supersection
domain = '0000';
blocksize = 16384;
outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>;
// Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes
if SCTLR.TRE == '0' then
if RemapRegsHaveResetValues() then
result.addrdesc.memattrs = AArch32.DefaultTEXDecode(tex, c, b, S, acctype);
else
result.addrdesc.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
else
result.addrdesc.memattrs = AArch32.RemappedTEXDecode(tex, c, b, S, acctype);
// Set the rest of the TLBRecord, try to add it to the TLB, and return it.
result.perms.ap = ap;
result.perms.xn = xn;
result.perms.pxn = pxn;
result.nG = nG;
result.domain = domain;
result.level = level;
result.blocksize = blocksize;
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.paddress.NS = if IsSecure() then NS else '1';
result.addrdesc.fault = AArch32.NoFault();
return result;
// AArch64.BreakpointMatch()
// =========================
// Breakpoint matching in an AArch64 translation regime.
boolean AArch64.BreakpointMatch(integer n, bits(64) vaddress,RemapRegsHaveResetValues(); AccType acctype, integer size)
assert !ELUsingAArch32(S1TranslationRegime());
assert n < GetNumBreakpoints();
enabled = DBGBCR_EL1[n].E == '1';
ispriv = PSTATE.EL != EL0;
linked = DBGBCR_EL1[n].BT == '0x01';
isbreakpnt = TRUE;
linked_to = FALSE;
state_match = AArch64.StateMatch(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC,
linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
value_match = AArch64.BreakpointValueMatch(n, vaddress, linked_to);
if HaveAnyAArch32() && size == 4 then // Check second halfword
// If the breakpoint address and BAS of an Address breakpoint match the address of the
// second halfword of an instruction, but not the address of the first halfword, it is
// CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug
// event.
match_i = AArch64.BreakpointValueMatch(n, vaddress + 2, linked_to);
if !value_match && match_i then
value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then
// The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED
// UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction
// at the address DBGBVR_EL1[n]+2.
if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
match = value_match && state_match && enabled;
return match;
// AArch64.BreakpointValueMatch()
// ==============================
// AArch64.BreakpointMatch()
// =========================
// Breakpoint matching in an AArch64 translation regime.
boolean AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to)
// "n" is the identity of the breakpoint unit to match against.
// "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context
// matching breakpoints.
// "linked_to" is TRUE if this is a call from StateMatch for linking.
// If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives
// no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint.
if n >=AArch64.BreakpointMatch(integer n, bits(64) vaddress, GetNumBreakpointsAccType() then
(c, n) =acctype, integer size)
assert ! ConstrainUnpredictableIntegerELUsingAArch32(0,( GetNumBreakpointsS1TranslationRegime() - 1,());
assert n <= Unpredictable_BPNOTIMPLUInt);
assert c IN {(ID_AA64DFR0_EL1.BRPs);
enabled = DBGBCR_EL1[n].E == '1';
ispriv = PSTATE.EL !=Constraint_DISABLEDEL0,;
linked = DBGBCR_EL1[n].BT == '0x01';
isbreakpnt = TRUE;
linked_to = FALSE;
state_match = Constraint_UNKNOWNAArch64.StateMatch};
if c ==(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC,
linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
value_match = Constraint_DISABLEDAArch64.BreakpointValueMatch then return FALSE;
(n, vaddress, linked_to);
// If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a
// call from StateMatch for linking).
if DBGBCR_EL1[n].E == '0' then return FALSE;
context_aware = (n >= ( ifGetNumBreakpoints() - GetNumContextAwareBreakpoints()));
// If BT is set to a reserved type, behaves either as disabled or as a not-reserved type.
dbgtype = DBGBCR_EL1[n].BT;
if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching
dbgtype == '010x' || // Reserved
(dbgtype != '0x0x' && !context_aware) || // Context matching
(dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension
(c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return FALSE;
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
// Determine what to compare against.
match_addr = (dbgtype == '0x0x');
match_vmid = (dbgtype == '10xx');
match_cid = (dbgtype == '001x');
match_cid1 = (dbgtype IN { '101x', 'x11x'});
match_cid2 = (dbgtype == '11xx');
linked = (dbgtype == 'xxx1');
// If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a
// VMID and/or context ID match, of if not context-aware. The above assertions mean that the
// code can just test for match_addr == TRUE to confirm all these things.
if linked_to && (!linked || match_addr) then return FALSE;
// If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches.
if !linked_to && linked && !match_addr then return FALSE;
// Do the comparison.
if match_addr then
byte = UInt(vaddress<1:0>);
if HaveAnyAArch32() then
// T32 instructions can be executed at EL0 in an AArch64 translation regime.
assert byte IN {0,2}; // "vaddress" is halfword aligned
byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1');
else
assert byte == 0; // "vaddress" is word aligned
byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1
// If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB
// of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be
// included in the match.
// If 'vaddress' is outside of the current virtual address space, then the access
// generates a Translation fault.
integer top = if() && size == 4 then // Check second halfword
// If the breakpoint address and BAS of an Address breakpoint match the address of the
// second halfword of an instruction, but not the address of the first halfword, it is
// CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug
// event.
match_i = Have52BitVAExtAArch64.BreakpointValueMatch() then 52 else 48;
if !(n, vaddress + 2, linked_to);
if !value_match && match_i then
value_match =IsOnesConstrainUnpredictableBool(DBGBVR_EL1[n]<63:top>) && !(IsZeroUnpredictable_BPMATCHHALF(DBGBVR_EL1[n]<63:top>) then
if);
if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then
// The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED
// UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction
// at the address DBGBVR_EL1[n]+2.
if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESSUnpredictable_BPMATCHHALF) then
top = 63;
BVR_match = (vaddress<top:2> == DBGBVR_EL1[n]<top:2>) && byte_select_match;
elsif match_cid then
if IsInHost() then
BVR_match = (CONTEXTIDR_EL2<31:0> == DBGBVR_EL1[n]<31:0>);
else
BVR_match = (PSTATE.EL IN {EL0, EL1} && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>);
elsif match_cid1 then
BVR_match = (PSTATE.EL IN {EL0, EL1} && !IsInHost() && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>);
if match_vmid then
if !Have16bitVMID() || VTCR_EL2.VS == '0' then
vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16);
else
vmid = VTTBR_EL2.VMID;
bvr_vmid = DBGBVR_EL1[n]<47:32>;
BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
!IsInHost() &&
vmid == bvr_vmid);
elsif match_cid2 then
BXVR_match = ((HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() &&
DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2<31:0>);
);
bvr_match_valid = (match_addr || match_cid || match_cid1);
bxvr_match_valid = (match_vmid || match_cid2);
match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match);
match = value_match && state_match && enabled;
return match;
// AArch64.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
// AArch64.BreakpointValueMatch()
// ==============================
boolean AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt,AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to)
// "n" is the identity of the breakpoint unit to match against.
// "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context
// matching breakpoints.
// "linked_to" is TRUE if this is a call from StateMatch for linking.
// If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives
// no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint.
if n > AccTypeUInt acctype, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, SSC, HMC, PxC) =(ID_AA64DFR0_EL1.BRPs) then
(c, n) = CheckValidStateMatchConstrainUnpredictableInteger(SSC, HMC, PxC, isbreakpnt);
if c ==(0, UInt(ID_AA64DFR0_EL1.BRPs), Unpredictable_BPNOTIMPL);
assert c IN {Constraint_DISABLED then return FALSE;
// Otherwise the HMC,SSC,PxC values are either valid or the values returned by
// CheckValidStateMatch are valid.
EL3_match =, HaveELConstraint_UNKNOWN(};
if c ==EL3Constraint_DISABLED) && HMC == '1' && SSC<0> == '0';
EL2_match =then return FALSE;
// If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a
// call from StateMatch for linking).
if DBGBCR_EL1[n].E == '0' then return FALSE;
context_aware = (n >= UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs));
// If BT is set to a reserved type, behaves either as disabled or as a not-reserved type.
dbgtype = DBGBCR_EL1[n].BT;
if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching
dbgtype == '010x' || // Reserved
(dbgtype != '0x0x' && !context_aware) || // Context matching
(dbgtype == '1xxx' && !HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11');
EL1_match = PxC<0> == '1';
EL0_match = PxC<1> == '1';
if))) then // EL2 extension
(c, dbgtype) = HaveNV2ExtConstrainUnpredictableBits() && acctype ==( AccType_NV2REGISTERUnpredictable_RESBPTYPE && !isbreakpnt then
priv_match = EL2_match;
elsif !ispriv && !isbreakpnt then
priv_match = EL0_match;
else
case PSTATE.EL of
when);
assert c IN { EL3Constraint_DISABLED priv_match = EL3_match;
when, EL2Constraint_UNKNOWN priv_match = EL2_match;
when};
if c == EL1Constraint_DISABLED priv_match = EL1_match;
whenthen return FALSE;
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
// Determine what to compare against.
match_addr = (dbgtype == '0x0x');
match_vmid = (dbgtype == '10xx');
match_cid = (dbgtype == '001x');
match_cid1 = (dbgtype IN { '101x', 'x11x'});
match_cid2 = (dbgtype == '11xx');
linked = (dbgtype == 'xxx1');
// If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a
// VMID and/or context ID match, of if not context-aware. The above assertions mean that the
// code can just test for match_addr == TRUE to confirm all these things.
if linked_to && (!linked || match_addr) then return FALSE;
// If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches.
if !linked_to && linked && !match_addr then return FALSE;
// Do the comparison.
if match_addr then
byte = UInt(vaddress<1:0>);
if HaveAnyAArch32() then
// T32 instructions can be executed at EL0 in an AArch64 translation regime.
assert byte IN {0,2}; // "vaddress" is halfword aligned
byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1');
else
assert byte == 0; // "vaddress" is word aligned
byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1
// If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB
// of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be
// included in the match.
// If 'vaddress' is outside of the current virtual address space, then the access
// generates a Translation fault.
integer top = if Have52BitVAExt() then 52 else 48;
if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then
if ConstrainUnpredictableBool(Unpredicatable_DBGxVR_RESS) then
top = 63;
BVR_match = (vaddress<top:2> == DBGBVR_EL1[n]<top:2>) && byte_select_match;
elsif match_cid then
if IsInHost() then
BVR_match = (CONTEXTIDR_EL2<31:0> == DBGBVR_EL1[n]<31:0>);
else
BVR_match = (PSTATE.EL IN {EL0 priv_match = EL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = !,IsSecureEL1(); // Non-secure only
when '10' security_state_match =} && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>);
elsif match_cid1 then
BVR_match = (PSTATE.EL IN { IsSecureEL0(); // Secure only
when '11' security_state_match = (HMC == '1' ||, IsSecureEL1()); // HMC=1 -> Both, 0 -> Secure only
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn =} && ! UIntIsInHost(LBN);
first_ctx_cmp =() && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>);
if match_vmid then
if ! GetNumBreakpointsHave16bitVMID() -() || VTCR_EL2.VS == '0' then
vmid = GetNumContextAwareBreakpointsZeroExtend();
last_ctx_cmp =(VTTBR_EL2.VMID<7:0>, 16);
bvr_vmid = GetNumBreakpointsZeroExtend() - 1;
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) =(DBGBVR_EL1[n]<39:32>, 16);
else
vmid = VTTBR_EL2.VMID;
bvr_vmid = DBGBVR_EL1[n]<47:32>;
BXVR_match = (PSTATE.EL IN { ConstrainUnpredictableIntegerEL0(first_ctx_cmp, last_ctx_cmp,, Unpredictable_BPNOTCTXCMPEL1);
assert c IN {} &&Constraint_DISABLEDEL2Enabled,() &&
! Constraint_NONEIsInHost,() &&
vmid == bvr_vmid);
elsif match_cid2 then
BXVR_match = (( Constraint_UNKNOWNHaveVirtHostExt};
case c of
when() || Constraint_DISABLEDHaveV82Debug return FALSE; // Disabled
when()) && Constraint_NONEEL2Enabled linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(64) UNKNOWN;
linked_to = TRUE;
linked_match = AArch64.BreakpointValueMatch(lbn, vaddress, linked_to);
() &&
DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2<31:0>);
return priv_match && security_state_match && (!linked || linked_match); bvr_match_valid = (match_addr || match_cid || match_cid1);
bxvr_match_valid = (match_vmid || match_cid2);
match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match);
return match;
// AArch64.GenerateDebugExceptions()
// =================================
// AArch64.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
boolean AArch64.GenerateDebugExceptions()
returnAArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt, AArch64.GenerateDebugExceptionsFromAccType(PSTATE.EL,acctype, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, SSC, HMC, PxC) = CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt);
if c == Constraint_DISABLED then return FALSE;
// Otherwise the HMC,SSC,PxC values are either valid or the values returned by
// CheckValidStateMatch are valid.
EL3_match = HaveEL(EL3) && HMC == '1' && SSC<0> == '0';
EL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11');
EL1_match = PxC<0> == '1';
EL0_match = PxC<1> == '1';
if HaveNV2Ext() && acctype == AccType_NV2REGISTER && !isbreakpnt then
priv_match = EL2_match;
elsif !ispriv && !isbreakpnt then
priv_match = EL0_match;
else
case PSTATE.EL of
when EL3 priv_match = EL3_match;
when EL2 priv_match = EL2_match;
when EL1 priv_match = EL1_match;
when EL0 priv_match = EL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = !IsSecure(); // Non-secure only
when '10' security_state_match = IsSecure(); // Secure only
when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn = UInt(LBN);
first_ctx_cmp = (UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs));
last_ctx_cmp = UInt(ID_AA64DFR0_EL1.BRPs);
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(64) UNKNOWN;
linked_to = TRUE;
linked_match = AArch64.BreakpointValueMatch(), PSTATE.D);(lbn, vaddress, linked_to);
return priv_match && security_state_match && (!linked || linked_match);
// AArch64.GenerateDebugExceptionsFrom()
// =====================================
// AArch64.GenerateDebugExceptions()
// =================================
boolean AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask)
if OSLSR_EL1.OSLK == '1' ||AArch64.GenerateDebugExceptions()
return DoubleLockStatusAArch64.GenerateDebugExceptionsFrom() ||(PSTATE.EL, HaltedIsSecure() then
return FALSE;
route_to_el2 = HaveEL(EL2) && (!secure || IsSecureEL2Enabled()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
target = (if route_to_el2 then EL2 else EL1);
enabled = !HaveEL(EL3) || !secure || MDCR_EL3.SDD == '0';
if from == target then
enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0';
else
enabled = enabled && UInt(target) > UInt(from);
return enabled;(), PSTATE.D);
// AArch64.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
// AArch64.GenerateDebugExceptionsFrom()
// =====================================
boolean AArch64.CheckForPMUOverflow()
AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask)
pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1';
for n = 0 to if OSLSR_EL1.OSLK == '1' || GetNumEventCountersDoubleLockStatus() - 1
if() || Halted() then
return FALSE;
route_to_el2 = HaveEL(EL2) then
E = (if n <) && (!secure || IsSecureEL2Enabled()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
target = (if route_to_el2 then EL2 else EL1);
enabled = !HaveEL(EL3) || !secure || MDCR_EL3.SDD == '0';
if from == target then
enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0';
else
enabled = enabled && UInt(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME);
else
E = PMCR_EL0.E;
if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel((target) >InterruptID_PMUIRQUInt, if pmuirq then HIGH else LOW);
CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW);
(from);
// The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq; return enabled;
// AArch64.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
// AArch64.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
boolean AArch64.CountEvents(integer n)
assert n == 31 || n <AArch64.CheckForPMUOverflow()
pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1';
for n = 0 to GetNumEventCounters();
// Event counting is disabled in Debug state
debug = Halted();
// In Non-secure state, some counters are reserved for EL2
if HaveEL(EL2) then
resvd_for_el2 = n >= UInt(MDCR_EL2.HPMN) && n != 31;
else
resvd_for_el2 = FALSE;
// Main enable controls
E = if resvd_for_el2 then MDCR_EL2.HPME else PMCR_EL0.E;
enabled = E == '1' && PMCNTENSET_EL0<n> == '1';
// Event counting is allowed unless it is prohibited by any rule below
prohibited = FALSE;
// Event counting in Secure state is prohibited if all of:
// * EL3 is implemented
// * MDCR_EL3.SPME == 0, and either:
// - FEAT_PMUv3p7 is not implemented
// - MDCR_EL3.MPMX == 0
if(PMCR_EL0.N) - 1
if HaveEL(EL3) && IsSecure() then
if HavePMUv3p7() then
prohibited = MDCR_EL3.<SPME,MPMX> == '00';
else
prohibited = MDCR_EL3.SPME == '0';
// Event counting at EL3 is prohibited if all of:
// * FEAT_PMUv3p7 is implemented
// * One of the following is true:
// - MDCR_EL3.SPME == 1
// - PMNx is not reserved for EL2
// * MDCR_EL3.MPMX == 1
if !prohibited && PSTATE.EL == EL3 && HavePMUv3p7() then
prohibited = MDCR_EL3.MPMX == '1' && (MDCR_EL3.SPME == '1' || !resvd_for_el2);
// Event counting at EL2 is prohibited if all of:
// * The HPMD Extension is implemented
// * PMNx is not reserved for EL2
// * MDCR_EL2.HPMD == 1
if !prohibited && PSTATE.EL == EL2 &&) then
E = (if n < HaveHPMDExt() && !resvd_for_el2 then
prohibited = MDCR_EL2.HPMD == '1';
// The IMPLEMENTATION DEFINED authentication interface might override software
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// PMCR_EL0.DP disables the cycle counter when event counting is prohibited
if enabled && prohibited && n == 31 then
enabled = PMCR_EL0.DP == '0';
// If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited.
// This is not overridden by PMCR_EL0.DP.
if Havev85PMU() && n == 31 then
if HaveEL(EL3) && IsSecure() && MDCR_EL3.SCCD == '1' then
prohibited = TRUE;
if PSTATE.EL == EL2 && MDCR_EL2.HCCD == '1' then
prohibited = TRUE;
// If FEAT_PMUv3p7 is implemented, cycle counting an be prohibited at EL3.
// This is not overriden by PMCR_EL0.DP.
if HavePMUv3p7() && n == 31 then
if PSTATE.EL == EL3 && MDCR_EL3.MCCD == '1' then
prohibited = TRUE;
// Event counting might be frozen
frozen = FALSE;
// If FEAT_PMUv3p7 is implemented, event counting can be frozen
if HavePMUv3p7() && n != 31 then
ovflw = PMOVSCLR_EL0<GetNumEventCounters()-1:0>;
if resvd_for_el2 then
FZ = MDCR_EL2.HPMFZO;
ovflw<UInt(MDCR_EL2.HPMN)-1:0> =(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME);
else
E = PMCR_EL0.E;
if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel( ZerosInterruptID_PMUIRQ();
else
FZ = PMCR_EL0.FZO;
if, if pmuirq then HIGH else LOW);
CTI_SetEventLevel( HaveELCrossTriggerIn_PMUOverflow(EL2) then
ovflw<GetNumEventCounters()-1:UInt(MDCR_EL2.HPMN)> = Zeros();
frozen = FZ == '1' && !IsZero(ovflw);
// Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits
filter = if n == 31 then PMCCFILTR_EL0<31:0> else PMEVTYPER_EL0[n]<31:0>;
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
M = if HaveEL(EL3) then filter<26> else '0';
SH = if HaveEL(EL3) && HaveSecureEL2Ext() then filter<24> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = if IsSecure() then NSH == SH else NSH == '0';
when EL3 filtered = M != P;
, if pmuirq then HIGH else LOW);
return !debug && enabled && !prohibited && !filtered && !frozen; // The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq;
// CheckProfilingBufferAccess()
// ============================
// AArch64.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
SysRegAccessboolean CheckProfilingBufferAccess()
if !AArch64.CountEvents(integer n)
assert n == 31 || n <HaveStatisticalProfilingUInt() || PSTATE.EL ==(PMCR_EL0.N);
// Event counting is disabled in Debug state
debug = EL0Halted ||();
// In Non-secure state, some counters are reserved for EL2
if UsingAArch32HaveEL() then
return( SysRegAccess_UNDEFINEDEL2;
if PSTATE.EL ==) then
resvd_for_el2 = n >= EL1UInt &&(MDCR_EL2.HPMN) && n != 31;
else
resvd_for_el2 = FALSE;
// Main enable controls
E = if resvd_for_el2 then MDCR_EL2.HPME else PMCR_EL0.E;
enabled = E == '1' && PMCNTENSET_EL0<n> == '1';
// Event counting is allowed unless it is prohibited by any rule below
prohibited = FALSE;
// Event counting in Secure state is prohibited if all of:
// * EL3 is implemented
// * MDCR_EL3.SPME == 0, and either:
// - FEAT_PMUv3p7 is not implemented
// - MDCR_EL3.MPMX == 0
if EL2EnabledHaveEL() && MDCR_EL2.E2PB<0> != '1' then
return( SysRegAccess_TrapToEL2EL3;
if) && IsSecure() then
if HavePMUv3p7() then
prohibited = MDCR_EL3.<SPME,MPMX> == '00';
else
prohibited = MDCR_EL3.SPME == '0';
// Event counting at EL3 is prohibited if all of:
// * FEAT_PMUv3p7 is implemented
// * One of the following is true:
// - MDCR_EL3.SPME == 1
// - PMNx is not reserved for EL2
// * MDCR_EL3.MPMX == 1
if !prohibited && PSTATE.EL == EL3 && HavePMUv3p7() then
prohibited = MDCR_EL3.MPMX == '1' && (MDCR_EL3.SPME == '1' || !resvd_for_el2);
// Event counting at EL2 is prohibited if all of:
// * The HPMD Extension is implemented
// * PMNx is not reserved for EL2
// * MDCR_EL2.HPMD == 1
if !prohibited && PSTATE.EL == EL2 && HaveHPMDExt() && !resvd_for_el2 then
prohibited = MDCR_EL2.HPMD == '1';
// The IMPLEMENTATION DEFINED authentication interface might override software
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// PMCR_EL0.DP disables the cycle counter when event counting is prohibited
if enabled && prohibited && n == 31 then
enabled = PMCR_EL0.DP == '0';
// If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited.
// This is not overridden by PMCR_EL0.DP.
if Havev85PMU() && n == 31 then
if HaveEL(EL3) && PSTATE.EL !=) && IsSecure() && MDCR_EL3.SCCD == '1' then
prohibited = TRUE;
if PSTATE.EL == EL2 && MDCR_EL2.HCCD == '1' then
prohibited = TRUE;
// If FEAT_PMUv3p7 is implemented, cycle counting an be prohibited at EL3.
// This is not overriden by PMCR_EL0.DP.
if HavePMUv3p7() && n == 31 then
if PSTATE.EL == EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return&& MDCR_EL3.MCCD == '1' then
prohibited = TRUE;
// Event counting might be frozen
frozen = FALSE;
// If FEAT_PMUv3p7 is implemented, event counting can be frozen
if SysRegAccess_TrapToEL3HavePMUv3p7;
return() && n != 31 then
ovflw = PMOVSCLR_EL0< (PMCR_EL0.N)-1:0>;
if resvd_for_el2 then
FZ = MDCR_EL2.HPMFZO;
ovflw<UInt(MDCR_EL2.HPMN)-1:0> = Zeros();
else
FZ = PMCR_EL0.FZO;
if HaveEL(EL2) then
ovflw<UInt(PMCR_EL0.N)-1:UInt(MDCR_EL2.HPMN)> = Zeros();
frozen = FZ == '1' && !IsZero(ovflw);
// Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits
filter = if n == 31 then PMCCFILTR_EL0[31:0] else PMEVTYPER_EL0[n]<31:0>;
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
M = if HaveEL(EL3) then filter<26> else '0';
SH = if HaveEL(EL3) && HaveSecureEL2Ext() then filter<24> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = if IsSecure() then NSH == SH else NSH == '0';
when EL3SysRegAccess_OKUInt;filtered = M != P;
return !debug && enabled && !prohibited && !filtered && !frozen;
// CheckStatisticalProfilingAccess()
// =================================
// CheckProfilingBufferAccess()
// ============================
SysRegAccess CheckStatisticalProfilingAccess()
CheckProfilingBufferAccess()
if !HaveStatisticalProfiling() || PSTATE.EL == EL0 || UsingAArch32() then
return SysRegAccess_UNDEFINED;
if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.TPMS == '1' then
() && MDCR_EL2.E2PB<0> != '1' then
return SysRegAccess_TrapToEL2;
if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return SysRegAccess_TrapToEL3;
return SysRegAccess_OK;
// CollectContextIDR1()
// ====================
// CheckStatisticalProfilingAccess()
// =================================
booleanSysRegAccess CollectContextIDR1()
CheckStatisticalProfilingAccess()
if !StatisticalProfilingEnabledHaveStatisticalProfiling() then return FALSE;
if PSTATE.EL ==() || PSTATE.EL == EL2EL0 then return FALSE;
if|| UsingAArch32() then
return SysRegAccess_UNDEFINED;
if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.TPMS == '1' then
return SysRegAccess_TrapToEL2;
if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return SysRegAccess_TrapToEL3;
return SysRegAccess_OK() && HCR_EL2.TGE == '1' then return FALSE;
return PMSCR_EL1.CX == '1';;
// CollectContextIDR2()
// CollectContextIDR1()
// ====================
boolean CollectContextIDR2()
CollectContextIDR1()
if !StatisticalProfilingEnabled() then return FALSE;
if ! if PSTATE.EL ==EL2 then return FALSE;
if EL2Enabled() then return FALSE;
return PMSCR_EL2.CX == '1';() && HCR_EL2.TGE == '1' then return FALSE;
return PMSCR_EL1.CX == '1';
// CollectPhysicalAddress()
// ========================
// CollectContextIDR2()
// ====================
boolean CollectPhysicalAddress()
CollectContextIDR2()
if !StatisticalProfilingEnabled() then return FALSE;
(secure, el) = if ! ProfilingBufferOwnerEL2Enabled();
if ((!secure && HaveEL(EL2)) || IsSecureEL2Enabled()) then
return PMSCR_EL2.PA == '1' && (el == EL2 || PMSCR_EL1.PA == '1');
else
return PMSCR_EL1.PA == '1';() then return FALSE;
return PMSCR_EL2.CX == '1';
// CollectTimeStamp()
// ==================
// CollectPhysicalAddress()
// ========================
TimeStampboolean CollectTimeStamp()
CollectPhysicalAddress()
if !StatisticalProfilingEnabled() then return() then return FALSE;
(secure, el) = TimeStamp_None;
(-, el) = ProfilingBufferOwner();
if el == if ((!secure && HaveEL(EL2 then
if PMSCR_EL2.TS == '0' then return)) || TimeStamp_NoneIsSecureEL2Enabled;
else
if PMSCR_EL1.TS == '0' then return()) then
return PMSCR_EL2.PA == '1' && (el == TimeStamp_None;
if !HaveECVExt() then
PCT_el1 = '0':PMSCR_EL1.PCT<0>; // PCT<1> is RES0
else
PCT_el1 = PMSCR_EL1.PCT;
if PCT_el1 == '10' then
// Reserved value
(-, PCT_el1) = ConstrainUnpredictableBits(Unpredictable_PMSCR_PCT);
if EL2Enabled() then
if !HaveECVExt() then
PCT_el2 = '0':PMSCR_EL2.PCT<0>; // PCT<1> is RES0
else
PCT_el2 = PMSCR_EL2.PCT;
if PCT_el2 == '10' then
// Reserved value
(-, PCT_el2) = ConstrainUnpredictableBits(Unpredictable_PMSCR_PCT);
case PCT_el2 of
when '00'
return TimeStamp_Virtual;
when '01'
if el == EL2 then return TimeStamp_Physical;
when '11'
assert HaveECVExt(); // FEAT_ECV must be implemented
if el == EL1 && PCT_el1 == '00' then
return TimeStamp_Virtual;
else
return TimeStamp_OffsetPhysical;
otherwise
Unreachable();
case PCT_el1 of
when '00' return TimeStamp_Virtual;
when '01' return TimeStamp_Physical;
when '11'
assert HaveECVExt(); // FEAT_ECV must be implemented
return TimeStamp_OffsetPhysical;
otherwise Unreachable();|| PMSCR_EL1.PA == '1');
else
return PMSCR_EL1.PA == '1';
enumeration// CollectTimeStamp()
// ==================
TimeStamp OpType {CollectTimeStamp()
if !
OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap() then return
OpType_Store, // Any memory-write operation, including atomics without return;
(secure, el) =
OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap();
if el ==
OpType_Branch, // Software write to the PCthen
if PMSCR_EL2.TS == '0' then return
;
else
if PMSCR_EL1.TS == '0' then return TimeStamp_None;
if EL2Enabled() then
case PMSCR_EL2.PCT of
when '00'
return TimeStamp_Virtual;
when '01'
if el == EL2 then return TimeStamp_Physical;
when '11'
if (el == EL2 || PMSCR_EL1.PCT != '00') && HaveECVExt() then
return TimeStamp_OffsetPhysical;
otherwise
Unreachable();
case PMSCR_EL1.PCT of
when '00' return TimeStamp_Virtual;
when '01' return TimeStamp_Physical;
when '11' if HaveECVExt() then return TimeStamp_OffsetPhysical;
otherwise UnreachableOpType_Other // Any other class of operation
};();
// ProfilingBufferEnabled()
// ========================
booleanenumeration ProfilingBufferEnabled()
if !OpType {HaveStatisticalProfiling() then return FALSE;
(secure, el) =OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap ProfilingBufferOwner();
non_secure_bit = if secure then '0' else '1';
return (!OpType_Store, // Any memory-write operation, including atomics without returnOpType_LoadAtomic, // Atomics with return, compare-and-swap and swap
OpType_Branch, // Software write to the PC
ELUsingAArch32(el) && non_secure_bit == SCR_EL3.NS &&
PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');OpType_Other // Any other class of operation
};
// ProfilingBufferOwner()
// ======================
// ProfilingBufferEnabled()
// ========================
(boolean, bits(2))boolean ProfilingBufferOwner()
secure = ifProfilingBufferEnabled()
if ! HaveELHaveStatisticalProfiling(() then return FALSE;
(secure, el) =EL3ProfilingBufferOwner) then (MDCR_EL3.NSPB<1> == '0') else();
non_secure_bit = if secure then '0' else '1';
return (! IsSecureELUsingAArch32();
el = if HaveEL(EL2) && (!secure || IsSecureEL2Enabled()) && MDCR_EL2.E2PB == '00' then EL2 else EL1;
return (secure, el);(el) && non_secure_bit == SCR_EL3.NS &&
PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');
// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer
// addresses have been translated such that writes to the profiling buffer have been initiated.
// A following DSB completes when writes to the profiling buffer have completed.// ProfilingBufferOwner()
// ======================
(boolean, bits(2))
ProfilingSynchronizationBarrier();ProfilingBufferOwner()
secure = ifHaveEL(EL3) then (MDCR_EL3.NSPB<1> == '0') else IsSecure();
el = if !secure && HaveEL(EL2) && MDCR_EL2.E2PB == '00' then EL2 else EL1;
return (secure, el);
// SPECollectRecord()
// ==================
// Returns TRUE if the sampled class of instructions or operations, as
// determined by PMSFCR_EL1, are recorded and FALSE otherwise.
boolean// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer
// addresses have been translated such that writes to the profiling buffer have been initiated.
// A following DSB completes when writes to the profiling buffer have completed. SPECollectRecord(bits(64) events, integer total_latency,ProfilingSynchronizationBarrier(); OpType optype)
assert StatisticalProfilingEnabled();
bits(64) mask = 0xAA<63:0>; // Bits [7,5,3,1]
if HaveSVE() then mask<18:17> = Ones(); // Predicate flags
if HaveStatisticalProfilingv1p1() then mask<11> = '1'; // Alignment Flag
if HaveStatisticalProfilingv1p2() then mask<6> = '1'; // Not taken flag
mask<63:48> = bits(16) IMPLEMENTATION_DEFINED;
mask<31:24> = bits(8) IMPLEMENTATION_DEFINED;
mask<15:12> = bits(4) IMPLEMENTATION_DEFINED;
// Check for UNPREDICTABLE case
if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.<FnE,FE> == '11' &&
!IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)) then
if ConstrainUnpredictableBool(Unpredictable_BADPMSFCR) then
return FALSE;
else
// Filtering by event
if PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1) then
e = events AND mask;
m = PMSEVFR_EL1 AND mask;
if !IsZero(NOT(e) AND m) then return FALSE;
// Filtering by inverse event
if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.FnE == '1' &&
!IsZero(PMSNEVFR_EL1)) then
e = events AND mask;
m = PMSNEVFR_EL1 AND mask;
if !IsZero(e AND m) then return FALSE;
// Filtering by type
if PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>) then
case optype of
when OpType_Branch
if PMSFCR_EL1.B == '0' then return FALSE;
when OpType_Load
if PMSFCR_EL1.LD == '0' then return FALSE;
when OpType_Store
if PMSFCR_EL1.ST == '0' then return FALSE;
when OpType_LoadAtomic
if PMSFCR_EL1.<LD,ST> == '00' then return FALSE;
otherwise
return FALSE;
// Filtering by latency
if PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT) then
if total_latency < UInt(PMSLATFR_EL1.MINLAT) then
return FALSE;
// Check for UNPREDICTABLE cases
if ((PMSFCR_EL1.FE == '1' && IsZero(PMSEVFR_EL1 AND mask)) ||
(PMSFCR_EL1.FT == '1' && IsZero(PMSFCR_EL1.<B,LD,ST>)) ||
(PMSFCR_EL1.FL == '1' && IsZero(PMSLATFR_EL1.MINLAT))) then
return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR);
if (HaveStatisticalProfilingv1p2() &&
((PMSFCR_EL1.FnE == '1' && IsZero(PMSNEVFR_EL1 AND mask)) ||
(PMSFCR_EL1.<FnE,FE> == '11' &&
!IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)))) then
return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR);
return TRUE;
// StatisticalProfilingEnabled()
// =============================
// SPECollectRecord()
// ==================
// Returns TRUE if the sampled class of instructions or operations, as
// determined by PMSFCR_EL1, are recorded and FALSE otherwise.
boolean StatisticalProfilingEnabled()
if !SPECollectRecord(bits(64) events, integer total_latency,HaveStatisticalProfilingOpType() ||optype)
assert UsingAArch32StatisticalProfilingEnabled() || !();
bits(64) mask = 0xAA<63:0>; // Bits [7,5,3,1]
ifProfilingBufferEnabledHaveSVE() then
return FALSE;
in_host =() then mask<18:17> = EL2EnabledOnes() && HCR_EL2.TGE == '1';
(secure, el) =(); // Predicate flags
if ProfilingBufferOwnerHaveStatisticalProfilingv1p1();
() then mask<11> = '1'; // Alignment Flag
if UIntHaveStatisticalProfilingv1p2(el) <() then mask<6> = '1'; // Not taken flag
mask<63:48> = bits(16) IMPLEMENTATION_DEFINED;
mask<31:24> = bits(8) IMPLEMENTATION_DEFINED;
mask<15:12> = bits(4) IMPLEMENTATION_DEFINED;
// Check for UNPREDICTABLE case
if ( HaveStatisticalProfilingv1p2() && PMSFCR_EL1.<FnE,FE> == '11' &&
!IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)) then
if ConstrainUnpredictableBool(Unpredictable_BADPMSFCR) then
return FALSE;
else
// Filtering by event
if PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1) then
e = events AND mask;
m = PMSEVFR_EL1 AND mask;
if !IsZero(NOT(e) AND m) then return FALSE;
// Filtering by inverse event
if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.FnE == '1' &&
!IsZero(PMSNEVFR_EL1)) then
e = events AND mask;
m = PMSNEVFR_EL1 AND mask;
if !IsZero(e AND m) then return FALSE;
// Filtering by type
if PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>) then
case optype of
when OpType_Branch
if PMSFCR_EL1.B == '0' then return FALSE;
when OpType_Load
if PMSFCR_EL1.LD == '0' then return FALSE;
when OpType_Store
if PMSFCR_EL1.ST == '0' then return FALSE;
when OpType_LoadAtomic
if PMSFCR_EL1.<LD,ST> == '00' then return FALSE;
otherwise
return FALSE;
// Filtering by latency
if PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT) then
if total_latency < UInt(PSTATE.EL) || secure !=(PMSLATFR_EL1.MINLAT) then
return FALSE;
// Check for UNPREDICTABLE cases
if ((PMSFCR_EL1.FE == '1' && IsSecureIsZero() || (in_host && el ==(PMSEVFR_EL1 AND mask)) ||
(PMSFCR_EL1.FT == '1' && EL1IsZero) then
return FALSE;
case PSTATE.EL of
when(PMSFCR_EL1.<B,LD,ST>)) ||
(PMSFCR_EL1.FL == '1' && EL3IsZero (PMSLATFR_EL1.MINLAT))) then
returnUnreachableConstrainUnpredictableBool();
when( EL2Unpredictable_BADPMSFCR spe_bit = PMSCR_EL2.E2SPE;
when);
if ( EL1HaveStatisticalProfilingv1p2 spe_bit = PMSCR_EL1.E1SPE;
when() &&
((PMSFCR_EL1.FnE == '1' && (PMSNEVFR_EL1 AND mask)) ||
(PMSFCR_EL1.<FnE,FE> == '11' &&
!IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)))) then
return ConstrainUnpredictableBool(Unpredictable_BADPMSFCREL0IsZero spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE);
);
return spe_bit == '1'; return TRUE;
enumeration// StatisticalProfilingEnabled()
// =============================
boolean SysRegAccess {StatisticalProfilingEnabled()
if ! SysRegAccess_OK,() ||
SysRegAccess_UNDEFINED,() || !
SysRegAccess_TrapToEL1,() then
return FALSE;
in_host =
SysRegAccess_TrapToEL2,() && HCR_EL2.TGE == '1';
(secure, el) =
();
if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure() || (in_host && el == EL1) then
return FALSE;
case PSTATE.EL of
when EL3 Unreachable();
when EL2 spe_bit = PMSCR_EL2.E2SPE;
when EL1 spe_bit = PMSCR_EL1.E1SPE;
when EL0SysRegAccess_TrapToEL3 };spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE);
return spe_bit == '1';
enumeration TimeStamp {SysRegAccess {
TimeStamp_None, // No timestampSysRegAccess_OK,
TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)SysRegAccess_UNDEFINED,
TimeStamp_Physical, // Physical counter value with no offsetSysRegAccess_TrapToEL1,
TimeStamp_OffsetPhysical, // Physical counter value minus CNTPOFF_EL2SysRegAccess_TrapToEL2,
TimeStamp_Virtual }; // Physical counter value minus CNTVOFF_EL2SysRegAccess_TrapToEL3 };
// AArch64.TakeExceptionInDebugState()
// ===================================
// Take an exception in Debug state to an Exception Level using AArch64.enumeration
AArch64.TakeExceptionInDebugState(bits(2) target_el,TimeStamp { ExceptionRecord exception)
assertTimeStamp_None, // No timestamp HaveEL(target_el) && !TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)ELUsingAArch32(target_el) &&TimeStamp_Physical, // Physical counter value with no offset UInt(target_el) >=TimeStamp_OffsetPhysical, // Physical counter value minus CNTPOFF_EL2 UInt(PSTATE.EL);
sync_errors = HaveIESB() && SCTLR[target_el].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = bits(64) UNKNOWN;
ELR[] = bits(64) UNKNOWN;
// PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN.
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = '1';
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(64) UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields(); // Update EDSCR processor state flags.
if sync_errors then
SynchronizeErrors();
EndOfInstruction();TimeStamp_Virtual }; // Physical counter value minus CNTVOFF_EL2
// AArch64.WatchpointByteMatch()
// =============================
boolean// AArch64.TakeExceptionInDebugState()
// ===================================
// Take an exception in Debug state to an Exception Level using AArch64. AArch64.WatchpointByteMatch(integer n,AArch64.TakeExceptionInDebugState(bits(2) target_el, AccTypeExceptionRecord acctype, bits(64) vaddress)
integer top = ifexception)
assert Have52BitVAExtHaveEL() then 52 else 48;
bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR_EL1[n].BAS<(target_el) && !ELUsingAArch32(target_el) && UInt(vaddress<bottom-1:0>)> != '0');
mask =(target_el) >= UInt(DBGWCR_EL1[n].MASK);
(PSTATE.EL);
// If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or
// DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && ! sync_errors =IsOnesHaveIESB(DBGWCR_EL1[n].BAS) then
byte_select_match =() && ConstrainUnpredictableBoolSCTLR([target_el].IESB == '1';
ifUnpredictable_WPMASKANDBASHaveDoubleFaultExt);
else
LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB);
if !() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el ==IsZeroEL3(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match =);
// SCTLR[].IESB might be ignored in Debug state.
if ! ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUSUnpredictable_IESBinDebug);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) =) then
sync_errors = FALSE; ConstrainUnpredictableIntegerSynchronizeContext(3, 31,();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = Unpredictable_RESWPMASKUsingAArch32);
assert c IN {();
if from_32 thenConstraint_DISABLEDAArch64.MaybeZeroRegisterUppers,(); Constraint_NONEMaybeZeroSVEUppers,(target_el); Constraint_UNKNOWNAArch64.ReportException};
case c of
when(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1'; Constraint_DISABLEDSPSR return FALSE; // Disabled
when[] = bits(64) UNKNOWN; Constraint_NONEELR mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
[] = bits(64) UNKNOWN;
if mask > bottom then
// If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB
// of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be
// included in the match.
if ! // PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN.
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (IsOnesHavePANExt(DBGBVR_EL1[n]<63:top>) && !() && (PSTATE.EL ==IsZeroEL1(DBGBVR_EL1[n]<63:top>) then
if|| (PSTATE.EL == ConstrainUnpredictableBoolEL2(&&Unpredictable_DBGxVR_RESSELIsInHost) then
top = 63;
WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !(IsZeroEL0(DBGWVR_EL1[n]<mask-1:bottom>) then
WVR_match =))) && ConstrainUnpredictableBoolSCTLR([].SPAN == '0') then
PSTATE.PAN = '1';
if() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = '1';
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(64) UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields(); // Update EDSCR processor state flags.
if sync_errors then
SynchronizeErrors();
EndOfInstructionUnpredictable_WPMASKEDBITSHaveUAOExt);
else
WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>;
return WVR_match && byte_select_match;();
// AArch64.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch64 translation regime.
// AArch64.WatchpointByteMatch()
// =============================
boolean AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv,AArch64.WatchpointByteMatch(integer n,
AccType acctype, boolean iswrite)
assert !acctype, bits(64) vaddress)
integer top = ifELUsingAArch32Have52BitVAExt(() then 52 else 48;
bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR_EL1[n].BAS<S1TranslationRegimeUInt());
assert n <(vaddress<bottom-1:0>)> != '0');
mask = GetNumWatchpointsUInt();
(DBGWCR_EL1[n].MASK);
// "ispriv" is:
// * FALSE for all loads, stores, and atomic operations executed at EL0.
// * FALSE if the access is unprivileged.
// * TRUE for all other loads, stores, and atomic operations.
enabled = DBGWCR_EL1[n].E == '1';
linked = DBGWCR_EL1[n].WT == '1';
isbreakpnt = FALSE;
state_match = // If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or
// DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && ! AArch64.StateMatchIsOnes(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC,
linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
ls_match = FALSE;
if acctype ==(DBGWCR_EL1[n].BAS) then
byte_select_match = AccType_ATOMICRWConstrainUnpredictableBool then
ls_match = (DBGWCR_EL1[n].LSC != '00');
else
ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match ||( );
else
LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB);
if !IsZero(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
// If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB
// of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be
// included in the match.
if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then
if ConstrainUnpredictableBool(Unpredicatable_DBGxVR_RESS) then
top = 63;
WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITSAArch64.WatchpointByteMatchUnpredictable_WPMASKANDBAS(n, acctype, vaddress + byte);
);
else
WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>;
return value_match && state_match && ls_match && enabled; return WVR_match && byte_select_match;
// AArch64.Abort()
// ===============
// Abort and Debug exception handling in an AArch64 translation regime.// AArch64.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch64 translation regime.
boolean
AArch64.Abort(bits(64) vaddress,AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv, FaultRecordAccType fault)
ifacctype, boolean iswrite)
assert ! IsDebugExceptionELUsingAArch32(fault) then
if fault.acctype ==( AccType_IFETCHS1TranslationRegime then
if());
assert n <= UsingAArch32UInt() && fault.debugmoe ==(ID_AA64DFR0_EL1.WRPs);
// "ispriv" is:
// * FALSE for all loads, stores, and atomic operations executed at EL0.
// * FALSE if the access is unprivileged.
// * TRUE for all other loads, stores, and atomic operations.
enabled = DBGWCR_EL1[n].E == '1';
linked = DBGWCR_EL1[n].WT == '1';
isbreakpnt = FALSE;
state_match = DebugException_VectorCatchAArch64.StateMatch then(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC,
linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
ls_match = FALSE;
if acctype ==
AArch64.VectorCatchExceptionAccType_ATOMICRW(fault);
elsethen
ls_match = (DBGWCR_EL1[n].LSC != '00');
else
ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match ||
AArch64.BreakpointExceptionAArch64.WatchpointByteMatch(fault);
else
AArch64.WatchpointException(vaddress, fault);
elsif fault.acctype == AccType_IFETCH then
AArch64.InstructionAbort(vaddress, fault);
else
AArch64.DataAbort(vaddress, fault);(n, acctype, vaddress + byte);
return value_match && state_match && ls_match && enabled;
// AArch64.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort and Watchpoint exceptions
// from an AArch64 translation regime.
ExceptionRecord// AArch64.Abort()
// ===============
// Abort and Debug exception handling in an AArch64 translation regime. AArch64.AbortSyndrome(AArch64.Abort(bits(64) vaddress,Exception exceptype, FaultRecord fault, bits(64) vaddress)
exception =fault)
if ExceptionSyndromeIsDebugException(exceptype);
d_side = exceptype IN {(fault) then
if fault.acctype ==Exception_DataAbortAccType_IFETCH,then
if Exception_NV2DataAbortUsingAArch32,() && fault.debugmoe == Exception_WatchpointDebugException_VectorCatch,then Exception_NV2WatchpointAArch64.VectorCatchException};
(exception.syndrome, exception.syndrome2) =(fault);
else AArch64.FaultSyndromeAArch64.BreakpointException(d_side, fault);
exception.vaddress =(fault);
else ZeroExtendAArch64.WatchpointException(vaddress);
if(vaddress, fault);
elsif fault.acctype == then
AArch64.InstructionAbort(vaddress, fault);
else
AArch64.DataAbortIPAValidAccType_IFETCH(fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress = fault.ipaddress.address;
else
exception.ipavalid = FALSE;
return exception;(vaddress, fault);
// AArch64.CheckPCAlignment()
// ==========================// AArch64.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort and Watchpoint exceptions
// from an AArch64 translation regime.
ExceptionRecord
AArch64.CheckPCAlignment()
bits(64) pc =AArch64.AbortSyndrome( ThisInstrAddrException();
if pc<1:0> != '00' thenexceptype,
fault, bits(64) vaddress)
exception = ExceptionSyndrome(exceptype);
d_side = exceptype IN {Exception_DataAbort, Exception_NV2DataAbort, Exception_Watchpoint, Exception_NV2Watchpoint};
(exception.syndrome, exception.syndrome2) = AArch64.FaultSyndrome(d_side, fault);
exception.vaddress = ZeroExtend(vaddress);
if IPAValidAArch64.PCAlignmentFaultFaultRecord();(fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress = fault.ipaddress.address;
else
exception.ipavalid = FALSE;
return exception;
// AArch64.DataAbort()
// ===================// AArch64.CheckPCAlignment()
// ==========================
AArch64.DataAbort(bits(64) vaddress,AArch64.CheckPCAlignment()
bits(64) pc = FaultRecord fault)
route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' && IsExternalAbort(fault);
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) ||
(HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) ||
IsSecondStage(fault)));
bits(64) preferred_exception_return = ThisInstrAddr();
if ( if pc<1:0> != '00' thenHaveDoubleFaultExtAArch64.PCAlignmentFault() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
exception = AArch64.AbortSyndrome(Exception_NV2DataAbort, fault, vaddress);
else
exception = AArch64.AbortSyndrome(Exception_DataAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);();
// AArch64.EffectiveTCF()
// ======================
// Returns the TCF field applied to tag check faults in the given Exception Level.
bits(2)// AArch64.DataAbort()
// =================== AArch64.EffectiveTCF(bits(2) el)
bits(2) tcf;
if el ==AArch64.DataAbort(bits(64) vaddress, FaultRecord fault)
route_to_el3 = HaveEL(EL3 then
tcf = SCTLR_EL3.TCF;
elsif el ==) && SCR_EL3.EA == '1' && EL2IsExternalAbort then
tcf = SCTLR_EL2.TCF;
elsif el ==(fault);
route_to_el2 = (PSTATE.EL IN { EL0, EL1 then
tcf = SCTLR_EL1.TCF;
elsif el ==} && EL0EL2Enabled && HCR_EL2.<E2H,TGE> == '11' then
tcf = SCTLR_EL2.TCF0;
elsif el ==() && (HCR_EL2.TGE == '1' ||
( () && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) ||
(HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) ||
IsSecondStage(fault)));
bits(64) preferred_exception_return = ThisInstrAddr();
if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
exception = AArch64.AbortSyndrome(Exception_NV2DataAbort, fault, vaddress);
else
exception = AArch64.AbortSyndrome(Exception_DataAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1EL0HaveRASExt && HCR_EL2.<E2H,TGE> != '11' then
tcf = SCTLR_EL1.TCF0;
return tcf;, exception, preferred_exception_return, vect_offset);
// AArch64.InstructionAbort()
// ==========================// AArch64.EffectiveTCF()
// ======================
// Returns the TCF field applied to tag check faults in the given Exception Level.
bits(2)
AArch64.InstructionAbort(bits(64) vaddress,AArch64.EffectiveTCF(bits(2) el)
bits(2) tcf;
if el == FaultRecord fault)
// External aborts on instruction fetch must be taken synchronously
if HaveDoubleFaultExt() then assert fault.statuscode != Fault_AsyncExternal;
route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' &&then
tcf = SCTLR_EL3.TCF;
elsif el == IsExternalAbortEL2(fault);
route_to_el2 = (PSTATE.EL IN {then
tcf = SCTLR_EL2.TCF;
elsif el ==EL0, EL1} &&then
tcf = SCTLR_EL1.TCF;
elsif el == EL2EnabledEL0() &&
(HCR_EL2.TGE == '1' ||&& HCR_EL2.<E2H,TGE> == '11' then
tcf = SCTLR_EL2.TCF0;
elsif el == IsSecondStageEL0(fault) ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault))));
bits(64) preferred_exception_return = ThisInstrAddr();
if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
exception = AArch64.AbortSyndrome(Exception_InstructionAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);&& HCR_EL2.<E2H,TGE> != '11' then
tcf = SCTLR_EL1.TCF0;
return tcf;
// AArch64.PCAlignmentFault()
// ==========================
// Called on unaligned program counter in AArch64 state.// AArch64.InstructionAbort()
// ==========================
AArch64.PCAlignmentFault()
bits(64) preferred_exception_return =AArch64.InstructionAbort(bits(64) vaddress, ThisInstrAddrFaultRecord();
vect_offset = 0x0;
exception =fault)
// External aborts on instruction fetch must be taken synchronously
if ExceptionSyndromeHaveDoubleFaultExt(() then assert fault.statuscode !=Exception_PCAlignmentFault_AsyncExternal);
exception.vaddress =;
route_to_el3 = ThisInstrAddrHaveEL();
if( UIntEL3(PSTATE.EL) >) && SCR_EL3.EA == '1' && UIntIsExternalAbort((fault);
route_to_el2 = (PSTATE.EL IN {EL0, EL1) then} &&
EL2Enabled() &&
(HCR_EL2.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault))));
bits(64) preferred_exception_return = ThisInstrAddr();
if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
exception = AArch64.AbortSyndrome(Exception_InstructionAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif( , exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2EL2EnabledEL3() && HCR_EL2.TGE == '1' then|| route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.RaiseTagCheckFault()
// ============================
// Raise a tag check fault exception.// AArch64.PCAlignmentFault()
// ==========================
// Called on unaligned program counter in AArch64 state.
AArch64.RaiseTagCheckFault(bits(64) va, boolean write)
bits(2) target_el;
AArch64.PCAlignmentFault()
bits(64) preferred_exception_return = ThisInstrAddr();
integer vect_offset = 0x0;
vect_offset = 0x0;
if PSTATE.EL == exception = EL0ExceptionSyndrome then
target_el = if HCR_EL2.TGE == '0' then( Exception_PCAlignment);
exception.vaddress = ThisInstrAddr();
if UInt(PSTATE.EL) > UInt(EL1 else) then EL2AArch64.TakeException;
else
target_el = PSTATE.EL;
exception =(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif ExceptionSyndromeEL2Enabled(() && HCR_EL2.TGE == '1' thenException_DataAbortAArch64.TakeException);
exception.syndrome<5:0> = '010001';
if write then
exception.syndrome<6> = '1';
exception.vaddress = bits(4) UNKNOWN : va<59:0>;(
EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// AArch64.ReportTagCheckFault()
// =============================
// Records a tag check fault exception into the appropriate TCFR_ELx.// AArch64.RaiseTagCheckFault()
// ============================
// Raise a tag check fault exception.
AArch64.ReportTagCheckFault(bits(2) el, bit ttbr)
if el ==AArch64.RaiseTagCheckFault(bits(64) va, boolean write)
bits(2) target_el;
bits(64) preferred_exception_return = EL3ThisInstrAddr then
assert ttbr == '0';
TFSR_EL3.TF0 = '1';
elsif el ==();
integer vect_offset = 0x0;
if PSTATE.EL == EL2EL0 then
if ttbr == '0' then
TFSR_EL2.TF0 = '1';
else
TFSR_EL2.TF1 = '1';
elsif el == target_el = if HCR_EL2.TGE == '0' then EL1 then
if ttbr == '0' then
TFSR_EL1.TF0 = '1';
else
TFSR_EL1.TF1 = '1';
elsif el ==else ;
else
target_el = PSTATE.EL;
exception = ExceptionSyndrome(Exception_DataAbort);
exception.syndrome<5:0> = '010001';
if write then
exception.syndrome<6> = '1';
exception.vaddress = bits(4) UNKNOWN : va<59:0>;
AArch64.TakeExceptionEL0EL2 then
if ttbr == '0' then
TFSRE0_EL1.TF0 = '1';
else
TFSRE0_EL1.TF1 = '1';(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.SPAlignmentFault()
// ==========================
// Called on an unaligned stack pointer in AArch64 state.// AArch64.ReportTagCheckFault()
// =============================
// Records a tag check fault exception into the appropriate TCFR_ELx.
AArch64.SPAlignmentFault()
bits(64) preferred_exception_return =AArch64.ReportTagCheckFault(bits(2) el, bit ttbr)
if el == ThisInstrAddrEL3();
vect_offset = 0x0;
exception =then
assert ttbr == '0';
TFSR_EL3.TF0 = '1';
elsif el == ExceptionSyndromeEL2(then
if ttbr == '0' then
TFSR_EL2.TF0 = '1';
else
TFSR_EL2.TF1 = '1';
elsif el ==Exception_SPAlignment);
if UInt(PSTATE.EL) > UInt(EL1) thenthen
if ttbr == '0' then
TFSR_EL1.TF0 = '1';
else
TFSR_EL1.TF1 = '1';
elsif el ==
AArch64.TakeExceptionEL0(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif EL2Enabled() && HCR_EL2.TGE == '1' then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);then
if ttbr == '0' then
TFSRE0_EL1.TF0 = '1';
else
TFSRE0_EL1.TF1 = '1';
// AArch64.TagCheckFault()
// =======================
// Handle a tag check fault condition.// AArch64.SPAlignmentFault()
// ==========================
// Called on an unaligned stack pointer in AArch64 state.
AArch64.TagCheckFault(bits(64) vaddress,AArch64.SPAlignmentFault()
bits(64) preferred_exception_return = AccTypeThisInstrAddr acctype, boolean iswrite)
bits(2) tcf =();
vect_offset = 0x0;
exception = AArch64.EffectiveTCFExceptionSyndrome(PSTATE.EL);
case tcf of
when '00' // Tag Check Faults have no effect on the PE
return;
when '01' // Tag Check Faults cause a synchronous exception(
AArch64.RaiseTagCheckFaultException_SPAlignment(vaddress, iswrite);
when '10' // Tag Check Faults are asynchronously accumulated);
if
AArch64.ReportTagCheckFaultUInt(PSTATE.EL, vaddress<55>);
when '11' // Tag Check Faults cause a synchronous exception on reads or on
// a read-write access, and are asynchronously accumulated on writes
// Check for access performing both a read and a write.
readwrite = acctype IN {(PSTATE.EL) >AccType_ATOMICRWUInt,(
AccType_ORDEREDATOMICRWEL1,) then
AccType_ORDEREDRWAArch64.TakeException};
if !iswrite || readwrite then(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif
AArch64.RaiseTagCheckFaultEL2Enabled(vaddress, iswrite);
else() && HCR_EL2.TGE == '1' then
(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1AArch64.ReportTagCheckFaultAArch64.TakeException(PSTATE.EL, vaddress<55>);, exception, preferred_exception_return, vect_offset);
// BranchTargetException()
// AArch64.TagCheckFault()
// =======================
// Raise branch target exception.// Handle a tag check fault condition.
AArch64.BranchTargetException(bits(52) vaddress)
route_to_el2 = PSTATE.EL ==AArch64.TagCheckFault(bits(64) vaddress, EL0AccType &&acctype, boolean iswrite)
bits(2) tcf = EL2EnabledAArch64.EffectiveTCF() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =(PSTATE.EL);
case tcf of
when '00' // Tag Check Faults have no effect on the PE
return;
when '01' // Tag Check Faults cause a synchronous exception ThisInstrAddrAArch64.RaiseTagCheckFault();
vect_offset = 0x0;
exception =(vaddress, iswrite);
when '10' // Tag Check Faults are asynchronously accumulated ExceptionSyndromeAArch64.ReportTagCheckFault((PSTATE.EL, vaddress<55>);
when '11' // Tag Check Faults cause a synchronous exception on reads or on
// a read-write access, and are asynchronously accumulated on writes
// Check for access performing both a read and a write.
readwrite = acctype IN {Exception_BranchTargetAccType_ATOMICRW);
exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> =, ZerosAccType_ORDEREDATOMICRW(); // RES0
if, UIntAccType_ORDEREDRW(PSTATE.EL) >};
if !iswrite || readwrite then UIntAArch64.RaiseTagCheckFault((vaddress, iswrite);
elseEL1AArch64.ReportTagCheckFault) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(PSTATE.EL, vaddress<55>);
// AArch64.TakePhysicalFIQException()
// ==================================// BranchTargetException()
// =======================
// Raise branch target exception.
AArch64.TakePhysicalFIQException()
AArch64.BranchTargetException(bits(52) vaddress)
route_to_el3 = route_to_el2 = PSTATE.EL == HaveEL(EL3) && SCR_EL3.FIQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0,&& EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1'));
() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x100;
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_FIQException_BranchTarget);
if route_to_el3 then exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> =
AArch64.TakeExceptionZeros((); // RES0
ifEL3UInt, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL ==(PSTATE.EL) > EL2UInt || route_to_el2 then
assert PSTATE.EL !=( EL3EL1;) then
AArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 thenAArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {EL0, EL1};, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.// AArch64.TakePhysicalFIQException()
// ==================================
AArch64.TakePhysicalIRQException()
AArch64.TakePhysicalFIQException()
route_to_el3 = HaveEL(EL3) && SCR_EL3.IRQ == '1';
) && SCR_EL3.FIQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1'));
(HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x80;
vect_offset = 0x100;
exception = ExceptionSyndrome(Exception_IRQException_FIQ);
if route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
assert PSTATE.EL != EL3;
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {EL0, EL1};
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakePhysicalSErrorException()
// =====================================// AArch64.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.
AArch64.TakePhysicalSErrorException(bits(25) syndrome)
AArch64.TakePhysicalIRQException()
route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1';
) && SCR_EL3.IRQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || (! (HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1'));
bits(64) preferred_exception_return =IsInHost() && HCR_EL2.AMO == '1')));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x180;
vect_offset = 0x80;
bits(2) target_el;
if PSTATE.EL == exception = EL3ExceptionSyndrome || route_to_el3 then
target_el =( Exception_IRQ);
if route_to_el3 then
AArch64.TakeException(EL3;
, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
target_el = assert PSTATE.EL != EL2EL3;
else
target_el =; EL1AArch64.TakeException;
if( IsSErrorEdgeTriggeredEL2(target_el, syndrome) then, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {
ClearPendingPhysicalSErrorEL0();
exception =, ExceptionSyndromeEL1(};Exception_SErrorAArch64.TakeException);
exception.syndrome = syndrome;(
AArch64.TakeExceptionEL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualFIQException()
// =================================// AArch64.TakePhysicalSErrorException()
// =====================================
AArch64.TakeVirtualFIQException()
assert PSTATE.EL IN {AArch64.TakePhysicalSErrorException(boolean impdef_syndrome, bits(24) syndrome)
route_to_el3 =HaveEL(EL3) && SCR_EL3.EA == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1
bits(64) preferred_exception_return =() &&
(HCR_EL2.TGE == '1' || (! IsInHost() && HCR_EL2.AMO == '1')));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x100;
vect_offset = 0x180;
exception = if IsSErrorEdgeTriggered(syndrome) then
ClearPendingPhysicalSError();
exception = ExceptionSyndrome();
exception.syndrome<24> = if impdef_syndrome then '1' else '0';
exception.syndrome<23:0> = syndrome;
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2Exception_FIQException_SError);, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualIRQException()
// AArch64.TakeVirtualFIQException()
// =================================
AArch64.TakeVirtualIRQException()
AArch64.TakeVirtualFIQException()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x80;
vect_offset = 0x100;
exception = ExceptionSyndrome(Exception_IRQException_FIQ);
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualSErrorException()
// ====================================// AArch64.TakeVirtualIRQException()
// =================================
AArch64.TakeVirtualSErrorException(bits(25) syndrome)
AArch64.TakeVirtualIRQException()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x180;
vect_offset = 0x80;
exception = ExceptionSyndrome(Exception_SErrorException_IRQ);
if HaveRASExt() then
exception.syndrome<24> = VSESR_EL2.IDS;
exception.syndrome<23:0> = VSESR_EL2.ISS;
else
impdef_syndrome = syndrome<24> == '1';
if impdef_syndrome then exception.syndrome = syndrome;
ClearPendingVirtualSError(););
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.BreakpointException()
// =============================// AArch64.TakeVirtualSErrorException()
// ====================================
AArch64.BreakpointException(AArch64.TakeVirtualSErrorException(boolean impdef_syndrome, bits(24) syndrome)
assert PSTATE.EL IN {FaultRecord fault)
assert PSTATE.EL != EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
();
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vect_offset = 0x180;
vaddress = bits(64) UNKNOWN;
exception = AArch64.AbortSyndromeExceptionSyndrome(Exception_BreakpointException_SError, fault, vaddress);
if PSTATE.EL ==);
if EL2HaveRASExt || route_to_el2 then() then
exception.syndrome<24> = VSESR_EL2.IDS;
exception.syndrome<23:0> = VSESR_EL2.ISS;
else
exception.syndrome<24> = if impdef_syndrome then '1' else '0';
if impdef_syndrome then exception.syndrome<23:0> = syndrome;
AArch64.TakeExceptionClearPendingVirtualSError(EL2, exception, preferred_exception_return, vect_offset);
else();
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.SoftwareBreakpoint()
// ============================// AArch64.BreakpointException()
// =============================
AArch64.SoftwareBreakpoint(bits(16) immediate)
route_to_el2 = (PSTATE.EL IN {AArch64.BreakpointException(FaultRecord fault)
assert PSTATE.EL != EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} &&
EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception = ExceptionSyndromeAArch64.AbortSyndrome(Exception_SoftwareBreakpointException_Breakpoint);
exception.syndrome<15:0> = immediate;
, fault, vaddress);
if if PSTATE.EL == UIntEL2(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then|| route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.SoftwareStepException()
// ===============================// AArch64.SoftwareBreakpoint()
// ============================
AArch64.SoftwareStepException()
assert PSTATE.EL !=AArch64.SoftwareBreakpoint(bits(16) immediate)
route_to_el2 = (PSTATE.EL IN { EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_SoftwareStepException_SoftwareBreakpoint);
exception.syndrome<15:0> = immediate;
if SoftwareStep_DidNotStepUInt() then
exception.syndrome<24> = '0';
else
exception.syndrome<24> = '1';
exception.syndrome<6> = if(PSTATE.EL) > SoftwareStep_SteppedEXUInt() then '1' else '0';
exception.syndrome<5:0> = '100010'; // IFSC = Debug Exception
if PSTATE.EL ==( ) then
AArch64.TakeExceptionEL2EL1 || route_to_el2 then(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.VectorCatchException()
// ==============================
// Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are
// being routed to EL2, as Vector Catch is a legacy debug event.// AArch64.SoftwareStepException()
// ===============================
AArch64.VectorCatchException(AArch64.SoftwareStepException()
assert PSTATE.EL !=FaultRecordEL3 fault)
assert PSTATE.EL !=;
route_to_el2 = (PSTATE.EL IN { EL2EL0;
assert, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception = AArch64.AbortSyndromeExceptionSyndrome(Exception_VectorCatchException_SoftwareStep, fault, vaddress););
if
SoftwareStep_DidNotStep() then
exception.syndrome<24> = '0';
else
exception.syndrome<24> = '1';
exception.syndrome<6> = if SoftwareStep_SteppedEX() then '1' else '0';
exception.syndrome<5:0> = '100010'; // IFSC = Debug Exception
if PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.WatchpointException()
// =============================// AArch64.VectorCatchException()
// ==============================
// Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are
// being routed to EL2, as Vector Catch is a legacy debug event.
AArch64.WatchpointException(bits(64) vaddress,AArch64.VectorCatchException( FaultRecord fault)
assert PSTATE.EL != EL3EL2;
route_to_el2 = (PSTATE.EL IN { assertEL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
if vaddress = bits(64) UNKNOWN;
exception = HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
exception = AArch64.AbortSyndrome(Exception_NV2WatchpointException_VectorCatch, fault, vaddress);
else
exception =, fault, vaddress); AArch64.AbortSyndrome(Exception_Watchpoint, fault, vaddress);
if PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in ESR
(integer,bit)// AArch64.WatchpointException()
// ============================= AArch64.ExceptionClass(AArch64.WatchpointException(bits(64) vaddress,ExceptionFaultRecord exceptype, bits(2) target_el)
il_is_valid = TRUE;
from_32 =fault)
assert PSTATE.EL != UsingAArch32EL3();
;
case exceptype of
when route_to_el2 = (PSTATE.EL IN { Exception_UncategorizedEL0 ec = 0x00; il_is_valid = FALSE;
when, Exception_WFxTrapEL1 ec = 0x01;
when} && Exception_CP15RTTrapEL2Enabled ec = 0x03; assert from_32;
when() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = Exception_CP15RRTTrapThisInstrAddr ec = 0x04; assert from_32;
when();
vect_offset = 0x0;
if Exception_CP14RTTrapHaveNV2Ext ec = 0x05; assert from_32;
when() && fault.acctype == Exception_CP14DTTrapAccType_NV2REGISTER ec = 0x06; assert from_32;
whenthen
exception = Exception_AdvSIMDFPAccessTrapAArch64.AbortSyndrome ec = 0x07;
when( Exception_FPIDTrapException_NV2Watchpoint ec = 0x08;
when, fault, vaddress);
else
exception = Exception_PACTrapAArch64.AbortSyndrome ec = 0x09;
when( Exception_LDST64BTrap ec = 0x0A;
when Exception_CP14RRTTrap ec = 0x0C; assert from_32;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE;
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_SystemRegisterTrap ec = 0x18; assert !from_32;
when Exception_SVEAccessTrap ec = 0x19; assert !from_32;
when Exception_ERetTrap ec = 0x1A; assert !from_32;
when Exception_PACFail ec = 0x1C; assert !from_32;
when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE;
when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE;
when Exception_DataAbort ec = 0x24;
when Exception_NV2DataAbort ec = 0x25;
when Exception_SPAlignment ec = 0x26; il_is_valid = FALSE; assert !from_32;
when Exception_FPTrappedException ec = 0x28;
when Exception_SError ec = 0x2F; il_is_valid = FALSE;
when Exception_Breakpoint ec = 0x30; il_is_valid = FALSE;
when Exception_SoftwareStep ec = 0x32; il_is_valid = FALSE;
when Exception_Watchpoint ec = 0x34; il_is_valid = FALSE;
when, fault, vaddress);
if PSTATE.EL == Exception_NV2WatchpointEL2 ec = 0x35; il_is_valid = FALSE;
when|| route_to_el2 then Exception_SoftwareBreakpointAArch64.TakeException ec = 0x38;
when( Exception_VectorCatchEL2 ec = 0x3A; il_is_valid = FALSE; assert from_32;
otherwise, exception, preferred_exception_return, vect_offset);
else UnreachableAArch64.TakeException();
if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then
ec = ec + 1;
if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then
ec = ec + 4;
if il_is_valid then
il = if( ThisInstrLengthEL1() == 32 then '1' else '0';
else
il = '1';
assert from_32 || il == '1'; // AArch64 instructions always 32-bit
return (ec,il);, exception, preferred_exception_return, vect_offset);
// AArch64.ReportException()
// =========================
// Report syndrome information for exception taken to AArch64 state.// AArch64.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in ESR
(integer,bit)
AArch64.ReportException(AArch64.ExceptionClass(ExceptionRecord exception, bits(2) target_el)
Exception exceptype = exception.exceptype;
exceptype, bits(2) target_el)
(ec,il) = il_is_valid = TRUE;
from_32 = AArch64.ExceptionClassUsingAArch32(exceptype, target_el);
iss = exception.syndrome;
iss2 = exception.syndrome2;
();
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1'; case exceptype of
when
ESRException_Uncategorized[target_el] = (ec = 0x00; il_is_valid = FALSE;
whenZerosException_WFxTrap(27) : // <63:37>
iss2 : // <36:32>
ec<5:0> : // <31:26>
il : // <25>
iss); // <24:0>
if exceptype IN {ec = 0x01;
whenException_CP15RTTrap ec = 0x03; assert from_32;
when Exception_CP15RRTTrap ec = 0x04; assert from_32;
when Exception_CP14RTTrap ec = 0x05; assert from_32;
when Exception_CP14DTTrap ec = 0x06; assert from_32;
when Exception_AdvSIMDFPAccessTrap ec = 0x07;
when Exception_FPIDTrap ec = 0x08;
when Exception_PACTrap ec = 0x09;
when Exception_LDST64BTrap ec = 0x0A;
when Exception_CP14RRTTrap ec = 0x0C; assert from_32;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE;
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_SystemRegisterTrap ec = 0x18; assert !from_32;
when Exception_SVEAccessTrap ec = 0x19; assert !from_32;
when Exception_ERetTrap ec = 0x1A; assert !from_32;
when Exception_PACFail ec = 0x1C; assert !from_32;
when Exception_InstructionAbort,ec = 0x20; il_is_valid = FALSE;
when Exception_PCAlignment,ec = 0x22; il_is_valid = FALSE;
when Exception_DataAbort,ec = 0x24;
when
Exception_NV2DataAbort,ec = 0x25;
when Exception_NV2WatchpointException_SPAlignment,ec = 0x26; il_is_valid = FALSE; assert !from_32;
when
Exception_FPTrappedException ec = 0x28;
when Exception_SError ec = 0x2F; il_is_valid = FALSE;
when Exception_Breakpoint ec = 0x30; il_is_valid = FALSE;
when Exception_SoftwareStep ec = 0x32; il_is_valid = FALSE;
when Exception_Watchpoint} thenec = 0x34; il_is_valid = FALSE;
when
FARException_NV2Watchpoint[target_el] = exception.vaddress;
elseec = 0x35; il_is_valid = FALSE;
when
FARException_SoftwareBreakpoint[target_el] = bits(64) UNKNOWN;
if target_el ==ec = 0x38;
when EL2Exception_VectorCatch then
if exception.ipavalid then
HPFAR_EL2<43:4> = exception.ipaddress<51:12>;
ifec = 0x3A; il_is_valid = FALSE; assert from_32;
otherwise IsSecureEL2EnabledUnreachable() &&();
if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then
ec = ec + 1;
if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then
ec = ec + 4;
if il_is_valid then
il = if IsSecureThisInstrLength() then
HPFAR_EL2.NS = exception.NS;
else
HPFAR_EL2.NS = '0';
else
HPFAR_EL2<43:4> = bits(40) UNKNOWN;
() == 32 then '1' else '0';
else
il = '1';
assert from_32 || il == '1'; // AArch64 instructions always 32-bit
return; return (ec,il);
// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values.// AArch64.ReportException()
// =========================
// Report syndrome information for exception taken to AArch64 state.
AArch64.ResetControlRegisters(boolean cold_reset);AArch64.ReportException(ExceptionRecord exception, bits(2) target_el)
Exception exceptype = exception.exceptype;
(ec,il) = AArch64.ExceptionClass(exceptype, target_el);
iss = exception.syndrome;
iss2 = exception.syndrome2;
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1';
ESR[target_el] = (Zeros(27) : // <63:37>
iss2 : // <36:32>
ec<5:0> : // <31:26>
il : // <25>
iss); // <24:0>
if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment, Exception_DataAbort,
Exception_NV2DataAbort, Exception_NV2Watchpoint,
Exception_Watchpoint} then
FAR[target_el] = exception.vaddress;
else
FAR[target_el] = bits(64) UNKNOWN;
if target_el == EL2 then
if exception.ipavalid then
HPFAR_EL2<43:4> = exception.ipaddress<51:12>;
if IsSecureEL2Enabled() && IsSecure() then
HPFAR_EL2.NS = exception.NS;
else
HPFAR_EL2.NS = '0';
else
HPFAR_EL2<43:4> = bits(40) UNKNOWN;
return;
// AArch64.TakeReset()
// ===================
// Reset into AArch64 state// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values.
AArch64.TakeReset(boolean cold_reset)
assert !AArch64.ResetControlRegisters(boolean cold_reset);HighestELUsingAArch32();
// Enter the highest implemented Exception level in AArch64 state
PSTATE.nRW = '0';
if HaveEL(EL3) then
PSTATE.EL = EL3;
elsif HaveEL(EL2) then
PSTATE.EL = EL2;
else
PSTATE.EL = EL1;
// Reset the system registers and other system components
AArch64.ResetControlRegisters(cold_reset);
// Reset all other PSTATE fields
PSTATE.SP = '1'; // Select stack pointer
PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked
PSTATE.SS = '0'; // Clear software step bit
PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// ELR_ELx and SPSR_ELx have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.
AArch64.ResetGeneralRegisters();
AArch64.ResetSIMDFPRegisters();
AArch64.ResetSpecialRegisters();
ResetExternalDebugRegisters(cold_reset);
bits(64) rv; // IMPLEMENTATION DEFINED reset vector
if HaveEL(EL3) then
rv = RVBAR_EL3;
elsif HaveEL(EL2) then
rv = RVBAR_EL2;
else
rv = RVBAR_EL1;
// The reset vector must be correctly aligned
assert IsZero(rv<63:PAMax()>) && IsZero(rv<1:0>);
BranchTo(rv, BranchType_RESET);
// AArch64.FPTrappedException()
// ============================// AArch64.TakeReset()
// ===================
// Reset into AArch64 state
AArch64.FPTrappedException(boolean is_ase, bits(8) accumulated_exceptions)
exception =AArch64.TakeReset(boolean cold_reset)
assert ! ExceptionSyndromeHighestELUsingAArch32(();
// Enter the highest implemented Exception level in AArch64 state
PSTATE.nRW = '0';
ifException_FPTrappedExceptionHaveEL);
if is_ase then
if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then
exception.syndrome<23> = '1'; // TFV
else
exception.syndrome<23> = '0'; // TFV
else
exception.syndrome<23> = '1'; // TFV
exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR
if exception.syndrome<23> == '1' then
exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
else
exception.syndrome<7,4:0> = bits(6) UNKNOWN;
route_to_el2 =( EL2EnabledEL3() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =) then
PSTATE.EL = ThisInstrAddrEL3();
vect_offset = 0x0;
if;
elsif UIntHaveEL(PSTATE.EL) >( UIntEL2() then
PSTATE.EL =EL2;
else
PSTATE.EL = EL1) then;
// Reset the system registers and other system components
AArch64.TakeExceptionAArch64.ResetControlRegisters(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(cold_reset);
// Reset all other PSTATE fields
PSTATE.SP = '1'; // Select stack pointer
PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked
PSTATE.SS = '0'; // Clear software step bit
PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// ELR_ELx and SPSR_ELx have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.
AArch64.TakeExceptionAArch64.ResetGeneralRegisters(();AArch64.ResetSIMDFPRegisters();
AArch64.ResetSpecialRegisters();
ResetExternalDebugRegisters(cold_reset);
bits(64) rv; // IMPLEMENTATION DEFINED reset vector
if HaveEL(EL3) then
rv = RVBAR_EL3;
elsif HaveEL(EL2, exception, preferred_exception_return, vect_offset);
else) then
rv = RVBAR_EL2;
else
rv = RVBAR_EL1;
// The reset vector must be correctly aligned
assert
AArch64.TakeExceptionIsZero((rv<63:()>) && IsZero(rv<1:0>);
BranchTo(rv, BranchType_RESETEL1PAMax, exception, preferred_exception_return, vect_offset););
// AArch64.CallHypervisor()
// ========================
// Performs a HVC call// AArch64.FPTrappedException()
// ============================
AArch64.CallHypervisor(bits(16) immediate)
assertAArch64.FPTrappedException(boolean is_ase, bits(8) accumulated_exceptions)
exception = HaveELExceptionSyndrome(EL2Exception_FPTrappedException);
if is_ase then
if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then
exception.syndrome<23> = '1'; // TFV
else
exception.syndrome<23> = '0'; // TFV
else
exception.syndrome<23> = '1'; // TFV
exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR
if exception.syndrome<23> == '1' then
exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
else
exception.syndrome<7,4:0> = bits(6) UNKNOWN;
if route_to_el2 = UsingAArch32EL2Enabled() then() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = AArch32.ITAdvanceThisInstrAddr();();
vect_offset = 0x0;
if
SSAdvanceUInt();
bits(64) preferred_exception_return =(PSTATE.EL) > NextInstrAddrUInt();
vect_offset = 0x0;
exception =( ExceptionSyndromeEL1() thenException_HypervisorCallAArch64.TakeException);
exception.syndrome<15:0> = immediate;
if PSTATE.EL ==(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then EL3 then
AArch64.TakeException(EL3EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL2EL1, exception, preferred_exception_return, vect_offset);
// AArch64.CallSecureMonitor()
// ===========================// AArch64.CallHypervisor()
// ========================
// Performs a HVC call
AArch64.CallSecureMonitor(bits(16) immediate)
AArch64.CallHypervisor(bits(16) immediate)
assert HaveEL(EL3EL2) && !);
ifELUsingAArch32(EL3);
if UsingAArch32() then AArch32.ITAdvance();
SSAdvance();
bits(64) preferred_exception_return = NextInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_MonitorCallException_HypervisorCall);
exception.syndrome<15:0> = immediate; exception.syndrome<15:0> = immediate;
if PSTATE.EL ==
EL3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
// AArch64.CallSupervisor()
// ========================
// Calls the Supervisor// AArch64.CallSecureMonitor()
// ===========================
AArch64.CallSupervisor(bits(16) immediate)
ifAArch64.CallSecureMonitor(bits(16) immediate)
assert HaveEL(EL3) && !ELUsingAArch32(EL3);
if UsingAArch32() then AArch32.ITAdvance();
SSAdvance();
route_to_el2 = PSTATE.EL == bits(64) preferred_exception_return = EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = NextInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_SupervisorCallException_MonitorCall);
exception.syndrome<15:0> = immediate;
if exception.syndrome<15:0> = immediate; UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(
AArch64.TakeExceptionEL3(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeException()
// =======================
// Take an exception to an Exception Level using AArch64.// AArch64.CallSupervisor()
// ========================
// Calls the Supervisor
AArch64.TakeException(bits(2) target_el,AArch64.CallSupervisor(bits(16) immediate)
if ExceptionRecordUsingAArch32 exception,
bits(64) preferred_exception_return, integer vect_offset)
assert() then HaveELAArch32.ITAdvance(target_el) && !();ELUsingAArch32SSAdvance(target_el) &&();
route_to_el2 = PSTATE.EL == UIntEL0(target_el) >=&& UIntEL2Enabled(PSTATE.EL);
() && HCR_EL2.TGE == '1';
sync_errors = bits(64) preferred_exception_return = HaveIESBNextInstrAddr() &&();
vect_offset = 0x0;
exception = SCTLRExceptionSyndrome[target_el].IESB == '1';
if( HaveDoubleFaultExtException_SupervisorCall() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el ==);
exception.syndrome<15:0> = immediate;
if EL3);
if sync_errors && InsertIESBBeforeException(target_el) then
SynchronizeErrors();
iesb_req = FALSE;
sync_errors = FALSE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
if UInt(target_el) >(PSTATE.EL) > UInt(PSTATE.EL) then
boolean lower_32;
if target_el ==( EL3 then
if EL2Enabled() then
lower_32 = ELUsingAArch32(EL2);
else
lower_32 = ELUsingAArch32(EL1);
elsif) then IsInHostAArch64.TakeException() && PSTATE.EL ==(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then EL0AArch64.TakeException && target_el ==( EL2 then
lower_32 =, exception, preferred_exception_return, vect_offset);
else ELUsingAArch32AArch64.TakeException(EL0);
else
lower_32 = ELUsingAArch32(target_el - 1);
vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400);
elsif PSTATE.SP == '1' then
vect_offset = vect_offset + 0x200;
bits(64) spsr = GetPSRFromPSTATE(AArch64_NonDebugState);
if PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() then
if HaveNV2Ext() && (HCR_EL2.<NV,NV1,NV2> == '100' || HCR_EL2.<NV,NV1,NV2> == '111') then
spsr<3:2> = '10';
else
if HaveNVExt() && HCR_EL2.<NV,NV1> == '10' then
spsr<3:2> = '10';
if HaveBTIExt() && !UsingAArch32() then
// SPSR[].BTYPE is only guaranteed valid for these exception types
if exception.exceptype IN {Exception_SError, Exception_IRQ, Exception_FIQ,
Exception_SoftwareStep, Exception_PCAlignment,
Exception_InstructionAbort, Exception_Breakpoint,
Exception_VectorCatch, Exception_SoftwareBreakpoint,
Exception_IllegalState, Exception_BranchTarget} then
zero_btype = FALSE;
else
zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE);
if zero_btype then spsr<11:10> = '00';
if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3 then
// external aborts are configured to be taken to EL3
exception.exceptype = Exception_DataAbort;
if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = spsr;
ELR[] = preferred_exception_return;
PSTATE.SS = '0';
PSTATE.<D,A,I,F> = '1111';
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS;
if HaveMTEExt() then PSTATE.TCO = '1';
BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
EndOfInstruction();, exception, preferred_exception_return, vect_offset);
// AArch64.AArch32SystemAccessTrap()
// =================================
// Trapped AARCH32 system register access.// AArch64.TakeException()
// =======================
// Take an exception to an Exception Level using AArch64.
AArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec)
assertAArch64.TakeException(bits(2) target_el, ExceptionRecord exception,
bits(64) preferred_exception_return, integer vect_offset)
assert HaveEL(target_el) && target_el !=(target_el) && ! EL0ELUsingAArch32 &&(target_el) && UInt(target_el) >= UInt(PSTATE.EL);
bits(64) preferred_exception_return = sync_errors = ThisInstrAddrHaveIESB();
vect_offset = 0x0;
exception =() && AArch64.AArch32SystemAccessTrapSyndromeSCTLR([target_el].IESB == '1';
ifThisInstrHaveDoubleFaultExt(), ec);() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el ==
);
if sync_errors && InsertIESBBeforeException(target_el) then
SynchronizeErrors();
iesb_req = FALSE;
sync_errors = FALSE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
if UInt(target_el) > UInt(PSTATE.EL) then
boolean lower_32;
if target_el == EL3 then
if EL2Enabled() then
lower_32 = ELUsingAArch32(EL2);
else
lower_32 = ELUsingAArch32(EL1);
elsif IsInHost() && PSTATE.EL == EL0 && target_el == EL2 then
lower_32 = ELUsingAArch32(EL0);
else
lower_32 = ELUsingAArch32(target_el - 1);
vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400);
elsif PSTATE.SP == '1' then
vect_offset = vect_offset + 0x200;
bits(64) spsr = GetPSRFromPSTATE(AArch64_NonDebugState);
if PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() then
if HaveNV2Ext() && (HCR_EL2.<NV,NV1,NV2> == '100' || HCR_EL2.<NV,NV1,NV2> == '111') then
spsr<3:2> = '10';
else
if HaveNVExt() && HCR_EL2.<NV,NV1> == '10' then
spsr<3:2> = '10';
if HaveBTIExt() && !UsingAArch32() then
// SPSR[].BTYPE is only guaranteed valid for these exception types
if exception.exceptype IN {Exception_SError, Exception_IRQ, Exception_FIQ,
Exception_SoftwareStep, Exception_PCAlignment,
Exception_InstructionAbort, Exception_Breakpoint,
Exception_VectorCatch, Exception_SoftwareBreakpoint,
Exception_IllegalState, Exception_BranchTarget} then
zero_btype = FALSE;
else
zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE);
if zero_btype then spsr<11:10> = '00';
if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3 then
// external aborts are configured to be taken to EL3
exception.exceptype = Exception_DataAbort;
if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = spsr;
ELR[] = preferred_exception_return;
PSTATE.SS = '0';
ShouldAdvanceSS = FALSE;
PSTATE.<D,A,I,F> = '1111';
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS;
if HaveMTEExt() then PSTATE.TCO = '1';
BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
EndOfInstructionAArch64.TakeExceptionEL3(target_el, exception, preferred_exception_return, vect_offset);();
// AArch64.AArch32SystemAccessTrapSyndrome()
// =========================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
ExceptionRecord// AArch64.AArch32SystemAccessTrap()
// =================================
// Trapped AARCH32 system register access. AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec)
assert
ExceptionRecordHaveEL exception;
case ec of
when 0x0 exception =(target_el) && target_el != ExceptionSyndromeEL0(&&Exception_UncategorizedUInt);
when 0x3 exception =(target_el) >= ExceptionSyndromeUInt((PSTATE.EL);
bits(64) preferred_exception_return =Exception_CP15RTTrapThisInstrAddr);
when 0x4 exception =();
vect_offset = 0x0;
exception = ExceptionSyndromeAArch64.AArch32SystemAccessTrapSyndrome(Exception_CP15RRTTrapThisInstr);
when 0x5 exception =(), ec); ExceptionSyndromeAArch64.TakeException(Exception_CP14RTTrap);
when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap);
when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap);
when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap);
otherwise Unreachable();
bits(20) iss = Zeros();
if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then
// Trapped MRC/MCR, VMRS on FPSID
if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<4:1> = instr<3:0>; // CRm
else
iss<19:17> = '000';
iss<16:14> = '111';
iss<13:10> = instr<19:16>; // reg
iss<4:1> = '0000';
if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15
iss<9:5> = '11111';
elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
if instr<19:16> == '1111' then // Rt2==15
iss<14:10> = bits(5) UNKNOWN;
else
iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>;
if instr<15:12> == '1111' then // Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == Exception_CP14DTTrap then
// Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<9:5> = bits(5) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == Exception_Uncategorized then
// Trapped for unknown reason
iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<19:0> = iss;
return exception;(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.AdvSIMDFPAccessTrap()
// =============================
// Trapped access to Advanced SIMD or FP registers due to CPACR[].// AArch64.AArch32SystemAccessTrapSyndrome()
// =========================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
ExceptionRecord
AArch64.AdvSIMDFPAccessTrap(bits(2) target_el)
bits(64) preferred_exception_return =AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec) ThisInstrAddrExceptionRecord();
vect_offset = 0x0;
exception;
route_to_el2 = (target_el == case ec of
when 0x0 exception = EL1ExceptionSyndrome &&( EL2EnabledException_Uncategorized() && HCR_EL2.TGE == '1');
if route_to_el2 then
exception =);
when 0x3 exception = ExceptionSyndrome(Exception_UncategorizedException_CP15RTTrap););
when 0x4 exception =
AArch64.TakeExceptionExceptionSyndrome(EL2Exception_CP15RRTTrap, exception, preferred_exception_return, vect_offset);
else
exception =);
when 0x5 exception = ExceptionSyndrome(Exception_CP14RTTrap);
when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap);
when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
exception.syndrome<24:20> = when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap);
when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap);
otherwise Unreachable();
bits(20) iss = Zeros();
if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then
// Trapped MRC/MCR, VMRS on FPSID
if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<4:1> = instr<3:0>; // CRm
else
iss<19:17> = '000';
iss<16:14> = '111';
iss<13:10> = instr<19:16>; // reg
iss<4:1> = '0000';
if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15
iss<9:5> = '11111';
elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
if instr<19:16> == '1111' then // Rt2==15
iss<14:10> = bits(5) UNKNOWN;
else
iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>;
if instr<15:12> == '1111' then // Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == Exception_CP14DTTrap then
// Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<9:5> = bits(5) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == Exception_Uncategorized then
// Trapped for unknown reason
iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> = ConditionSyndrome();
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);
();
exception.syndrome<19:0> = iss;
return; return exception;
// AArch64.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2.
boolean// AArch64.AdvSIMDFPAccessTrap()
// =============================
// Trapped access to Advanced SIMD or FP registers due to CPACR[]. AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
// Check for coarse-grained Hyp traps
if PSTATE.EL IN {AArch64.AdvSIMDFPAccessTrap(bits(2) target_el)
bits(64) preferred_exception_return =EL0ThisInstrAddr,();
vect_offset = 0x0;
route_to_el2 = (target_el == EL1} &&&& EL2Enabled() then
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if !() && HCR_EL2.TGE == '1');
if route_to_el2 then
exception =(Exception_Uncategorized);
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
exception.syndrome<24:20> = ConditionSyndrome();
AArch64.TakeExceptionIsInHostExceptionSyndrome() && !(major IN {4,14}) && HSTR_EL2<major> == '1' then
return TRUE;
(target_el, exception, preferred_exception_return, vect_offset);
// Check for MRC and MCR disabled by HCR_EL2.TIDCP
if (HCR_EL2.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE; return;
// AArch64.CheckFPAdvSIMDEnabled()
// ===============================
// Check against CPACR[]// AArch64.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2.
boolean
AArch64.CheckFPAdvSIMDEnabled()
AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
// Check for coarse-grained Hyp traps
if PSTATE.EL IN {EL0, EL1} && !} &&EL2Enabled() then
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if !IsInHost() then
// Check if access disabled in CPACR_EL1
case CPACR_EL1.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL1);
AArch64.CheckFPAdvSIMDTrap(); // Also check against CPTR_EL2 and CPTR_EL3() && !(major IN {4,14}) && HSTR_EL2<major> == '1' then
return TRUE;
// Check for MRC and MCR disabled by HCR_EL2.TIDCP
if (HCR_EL2.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE;
// AArch64.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3.// AArch64.CheckFPAdvSIMDEnabled()
// ===============================
// Check against CPACR[]
AArch64.CheckFPAdvSIMDTrap()
AArch64.CheckFPAdvSIMDEnabled()
if PSTATE.EL IN {EL0, EL1,} && ! EL2IsInHost} &&() then
// Check if access disabled in CPACR_EL1
case CPACR_EL1.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL2Enabled() then
// Check if access disabled in CPTR_EL2
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
case CPTR_EL2.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL2EL1);
else
if CPTR_EL2.TFP == '1' then); AArch64.AdvSIMDFPAccessTrapAArch64.CheckFPAdvSIMDTrap(EL2);
if HaveEL(EL3) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3);
return;(); // Also check against CPTR_EL2 and CPTR_EL3
// AArch64.CheckForERetTrap()
// ==========================
// Check for trap on ERET, ERETAA, ERETAB instruction// AArch64.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3.
AArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a)
route_to_el2 = FALSE;
// Non-secure EL1 execution of ERET, ERETAA, ERETAB when either HCR_EL2.NV or HFGITR_EL2.ERET is set,
// is trapped to EL2
route_to_el2 = (PSTATE.EL ==AArch64.CheckFPAdvSIMDTrap()
if PSTATE.EL IN { EL0, EL1 &&, EL2} && EL2Enabled() &&
((() then
// Check if access disabled in CPTR_EL2
ifHaveNVExtHaveVirtHostExt() && HCR_EL2.NV == '1') ||
(() && HCR_EL2.E2H == '1' then
case CPTR_EL2.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL ==HaveFGTExtEL0() && HCR_EL2.<E2H, TGE> != '11' &&
(!&& HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled thenHaveELAArch64.AdvSIMDFPAccessTrap(EL3EL2) || SCR_EL3.FGTEn == '1') && HFGITR_EL2.ERET == '1')));
if route_to_el2 then);
else
if CPTR_EL2.TFP == '1' then
ExceptionRecordAArch64.AdvSIMDFPAccessTrap exception;
bits(64) preferred_exception_return =( ThisInstrAddrEL2();
vect_offset = 0x0;
exception =);
if ExceptionSyndromeHaveEL(Exception_ERetTrapEL3);
if !eret_with_pac then // ERET
exception.syndrome<1> = '0';
exception.syndrome<0> = '0'; // RES0
else
exception.syndrome<1> = '1';
if pac_uses_key_a then // ERETAA
exception.syndrome<0> = '0';
else // ERETAB
exception.syndrome<0> = '1';) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then
AArch64.TakeExceptionAArch64.AdvSIMDFPAccessTrap(EL2EL3, exception, preferred_exception_return, vect_offset););
return;
// AArch64.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction// AArch64.CheckForERetTrap()
// ==========================
// Check for trap on ERET, ERETAA, ERETAB instruction
AArch64.CheckForSMCUndefOrTrap(bits(16) imm)
if PSTATE.EL ==AArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a)
route_to_el2 = FALSE;
// Non-secure EL1 execution of ERET, ERETAA, ERETAB when either HCR_EL2.NV or HFGITR_EL2.ERET is set,
// is trapped to EL2
route_to_el2 = (PSTATE.EL == EL0 then UNDEFINED;
if (!(PSTATE.EL == EL1 && EL2Enabled() && HCR_EL2.TSC == '1') &&() &&
((
HaveELHaveNVExt(() && HCR_EL2.NV == '1') ||
(EL3HaveFGTExt) && SCR_EL3.SMD == '1') then
UNDEFINED;
route_to_el2 = FALSE;
if !() && HCR_EL2.<E2H, TGE> != '11' &&
(!HaveEL(EL3) then
if PSTATE.EL ==) || SCR_EL3.FGTEn == '1') && HFGITR_EL2.ERET == '1')));
if route_to_el2 then EL1ExceptionRecord &&exception;
bits(64) preferred_exception_return = EL2Enabled() then
if HaveNVExt() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then
route_to_el2 = TRUE;
else
UNDEFINED;
else
UNDEFINED;
else
route_to_el2 = PSTATE.EL == EL1 && EL2Enabled() && HCR_EL2.TSC == '1';
if route_to_el2 then
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_MonitorCallException_ERetTrap);
exception.syndrome<15:0> = imm; if !eret_with_pac then // ERET
exception.syndrome<1> = '0';
exception.syndrome<0> = '0'; // RES0
else
exception.syndrome<1> = '1';
if pac_uses_key_a then // ERETAA
exception.syndrome<0> = '0';
else // ERETAB
exception.syndrome<0> = '1';
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
// AArch64.CheckForSVCTrap()
// =========================
// Check for trap on SVC instruction// AArch64.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction
AArch64.CheckForSVCTrap(bits(16) immediate)
ifAArch64.CheckForSMCUndefOrTrap(bits(16) imm)
if PSTATE.EL == HaveFGTExt() then
route_to_el2 = FALSE;
if PSTATE.EL == EL0 then
route_to_el2 = (!then UNDEFINED;
if (!(PSTATE.EL ==ELUsingAArch32(EL0) && !ELUsingAArch32(EL1) &&&& EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' &&
(HCR_EL2.<E2H, TGE> != '11' && (!() && HCR_EL2.TSC == '1') &&HaveEL(EL3) || SCR_EL3.FGTEn == '1')));
elsif PSTATE.EL ==) && SCR_EL3.SMD == '1') then
UNDEFINED;
route_to_el2 = FALSE;
if ! HaveEL(EL3) then
if PSTATE.EL == EL1 then
route_to_el2 = (!&&ELUsingAArch32EL2Enabled(() then
ifHaveNVExt() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then
route_to_el2 = TRUE;
else
UNDEFINED;
else
UNDEFINED;
else
route_to_el2 = PSTATE.EL == EL1) &&&& EL2Enabled() && HFGITR_EL2.SVC_EL1 == '1' &&
(HCR_EL2.<E2H, TGE> != '11' && (!() && HCR_EL2.TSC == '1';
if route_to_el2 then
bits(64) preferred_exception_return =HaveELThisInstrAddr(();
vect_offset = 0x0;
exception =EL3) || SCR_EL3.FGTEn == '1')));
if route_to_el2 then
exception = ExceptionSyndrome(Exception_SupervisorCallException_MonitorCall);
exception.syndrome<15:0> = immediate;
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;);
exception.syndrome<15:0> = imm;
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
// AArch64.CheckForWFxTrap()
// AArch64.CheckForSVCTrap()
// =========================
// Check for trap on WFE or WFI instruction// Check for trap on SVC instruction
AArch64.CheckForWFxTrap(bits(2) target_el,AArch64.CheckForSVCTrap(bits(16) immediate)
if WFxTypeHaveFGTExt wfxtype)
assert() then
route_to_el2 = FALSE;
if PSTATE.EL == EL0 then
route_to_el2 = (!ELUsingAArch32(EL0) && !ELUsingAArch32(EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' &&
(HCR_EL2.<E2H, TGE> != '11' && (!HaveEL(target_el);
boolean is_wfe = wfxtype IN {(WFxType_WFEEL3,) || SCR_EL3.FGTEn == '1')));
elsif PSTATE.EL == WFxType_WFETEL1};
case target_el of
whenthen
route_to_el2 = (! ELUsingAArch32(EL1
trap = (if is_wfe then) && SCTLREL2Enabled[].nTWE else() && HFGITR_EL2.SVC_EL1 == '1' &&
(HCR_EL2.<E2H, TGE> != '11' && (! SCTLRHaveEL[].nTWI) == '0';
when( EL2
trap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1';
when EL3
trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1';
) || SCR_EL3.FGTEn == '1')));
if trap then if route_to_el2 then
exception =
(Exception_SupervisorCall);
exception.syndrome<15:0> = immediate;
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
AArch64.TakeException(EL2AArch64.WFxTrapExceptionSyndrome(wfxtype, target_el);, exception, preferred_exception_return, vect_offset);
// AArch64.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.// AArch64.CheckForWFxTrap()
// =========================
// Check for trap on WFE or WFI instruction
AArch64.CheckIllegalState()
if PSTATE.IL == '1' then
route_to_el2 = PSTATE.EL ==AArch64.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assert EL0HaveEL &&(target_el);
case target_el of
when EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_IllegalState);
if UInt(PSTATE.EL) > UInt(EL1) thentrap = (if is_wfe then
AArch64.TakeExceptionSCTLR(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then[].nTWE else
AArch64.TakeExceptionSCTLR([].nTWI) == '0';
whenEL2, exception, preferred_exception_return, vect_offset);
elsetrap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1';
when
AArch64.TakeExceptionEL3(trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1';
if trap thenEL1AArch64.WFxTrap, exception, preferred_exception_return, vect_offset);(target_el, is_wfe);
// AArch64.MonitorModeTrap()
// =========================
// Trapped use of Monitor mode features in a Secure EL1 AArch32 mode// AArch64.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.
AArch64.MonitorModeTrap()
bits(64) preferred_exception_return =AArch64.CheckIllegalState()
if PSTATE.IL == '1' then
route_to_el2 = PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_UncategorizedException_IllegalState);
if IsSecureEL2EnabledUInt() then(PSTATE.EL) >
UInt(EL1) then
AArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 thenAArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL3EL1, exception, preferred_exception_return, vect_offset);
// AArch64.SystemAccessTrap()
// ==========================
// Trapped access to AArch64 system register or system instruction.// AArch64.MonitorModeTrap()
// =========================
// Trapped use of Monitor mode features in a Secure EL1 AArch32 mode
AArch64.SystemAccessTrap(bits(2) target_el, integer ec)
assertAArch64.MonitorModeTrap()
bits(64) preferred_exception_return = HaveELThisInstrAddr(target_el) && target_el !=();
vect_offset = 0x0;
exception = EL0ExceptionSyndrome &&( UIntException_Uncategorized(target_el) >=);
if UIntIsSecureEL2Enabled(PSTATE.EL);
bits(64) preferred_exception_return =() then ThisInstrAddrAArch64.TakeException();
vect_offset = 0x0;
exception =( AArch64.SystemAccessTrapSyndromeEL2(, exception, preferred_exception_return, vect_offset);ThisInstrAArch64.TakeException(), ec);(
AArch64.TakeExceptionEL3(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// AArch64.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch64 MSR/MRS instructions.
ExceptionRecord// AArch64.SystemAccessTrap()
// ==========================
// Trapped access to AArch64 system register or system instruction. AArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.SystemAccessTrap(bits(2) target_el, integer ec)
assert
ExceptionRecordHaveEL exception;
case ec of
when 0x0 // Trapped access due to unknown reason.
exception =(target_el) && target_el != ExceptionSyndromeEL0(&&Exception_UncategorizedUInt);
when 0x7 // Trapped access to SVE, Advance SIMD&FP system register.
exception =(target_el) >= ExceptionSyndromeUInt((PSTATE.EL);
bits(64) preferred_exception_return =Exception_AdvSIMDFPAccessTrapThisInstrAddr);
exception.syndrome<24:20> =();
vect_offset = 0x0;
exception = ConditionSyndromeAArch64.SystemAccessTrapSyndrome();
when 0x18 // Trapped access to system register or system instruction.
exception =( ExceptionSyndrome(Exception_SystemRegisterTrap);
instr = ThisInstr();
exception.syndrome<21:20> = instr<20:19>; // Op0
exception.syndrome<19:17> = instr<7:5>; // Op2
exception.syndrome<16:14> = instr<18:16>; // Op1
exception.syndrome<13:10> = instr<15:12>; // CRn
exception.syndrome<9:5> = instr<4:0>; // Rt
exception.syndrome<4:1> = instr<11:8>; // CRm
exception.syndrome<0> = instr<21>; // Direction
when 0x19 // Trapped access to SVE System register
exception =(), ec); ExceptionSyndromeAArch64.TakeException(Exception_SVEAccessTrap);
otherwise
Unreachable();
return exception;(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.UndefinedFault()
// ========================// AArch64.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch64 MSR/MRS instructions.
ExceptionRecord
AArch64.UndefinedFault()
route_to_el2 = PSTATE.EL ==AArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec) EL0ExceptionRecord &&exception;
case ec of
when 0x0 // Trapped access due to unknown reason.
exception = EL2EnabledExceptionSyndrome() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =( ThisInstrAddrException_Uncategorized();
vect_offset = 0x0;
exception =);
when 0x7 // Trapped access to SVE, Advance SIMD&FP system register.
exception = ExceptionSyndrome(Exception_UncategorizedException_AdvSIMDFPAccessTrap);
if exception.syndrome<24:20> = UIntConditionSyndrome(PSTATE.EL) >();
when 0x18 // Trapped access to system register or system instruction.
exception = UIntExceptionSyndrome(EL1Exception_SystemRegisterTrap) then);
instr =
AArch64.TakeExceptionThisInstr(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then();
exception.syndrome<21:20> = instr<20:19>; // Op0
exception.syndrome<19:17> = instr<7:5>; // Op2
exception.syndrome<16:14> = instr<18:16>; // Op1
exception.syndrome<13:10> = instr<15:12>; // CRn
exception.syndrome<9:5> = instr<4:0>; // Rt
exception.syndrome<4:1> = instr<11:8>; // CRm
exception.syndrome<0> = instr<21>; // Direction
when 0x19 // Trapped access to SVE System register
exception =
AArch64.TakeExceptionExceptionSyndrome(EL2Exception_SVEAccessTrap, exception, preferred_exception_return, vect_offset);
else);
otherwise
AArch64.TakeExceptionUnreachable(EL1, exception, preferred_exception_return, vect_offset);();
return exception;
// AArch64.WFxTrap()
// =================// AArch64.UndefinedFault()
// ========================
AArch64.WFxTrap(AArch64.UndefinedFault()
route_to_el2 = PSTATE.EL ==WFxTypeEL0 wfxtype, bits(2) target_el)
assert&& UIntEL2Enabled(target_el) >() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = UInt(PSTATE.EL);
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_WFxTrapException_Uncategorized);
exception.syndrome<24:20> =
if ConditionSyndromeUInt();
case wfxtype of
when(PSTATE.EL) > WFxType_WFIUInt
exception.syndrome<1:0> = '00';
when( WFxType_WFE
exception.syndrome<1:0> = '01';
when WFxType_WFIT
exception.syndrome<1:0> = '10';
when WFxType_WFET
exception.syndrome<1:0> = '11';
if target_el == EL1 &&) then EL2EnabledAArch64.TakeException() && HCR_EL2.TGE == '1' then(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// CheckFPAdvSIMDEnabled64()
// =========================
// AArch64 instruction wrapper// AArch64.WFxTrap()
// =================
CheckFPAdvSIMDEnabled64()AArch64.WFxTrap(bits(2) target_el, boolean is_wfe)
assert
(target_el) > UInt(PSTATE.EL);
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_WFxTrap);
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<0> = if is_wfe then '1' else '0';
if target_el == EL1 && EL2Enabled() && HCR_EL2.TGE == '1' then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeExceptionAArch64.CheckFPAdvSIMDEnabledUInt();(target_el, exception, preferred_exception_return, vect_offset);
// CheckLDST64BEnabled()
// =====================
// Checks for trap on ST64B and LD64B instructions// CheckFPAdvSIMDEnabled64()
// =========================
// AArch64 instruction wrapper
CheckLDST64BEnabled()
boolean trap = FALSE;
bits(25) iss =CheckFPAdvSIMDEnabled64() ZeroExtendAArch64.CheckFPAdvSIMDEnabled('10'); // 0x2
if PSTATE.EL == EL0 then
if !IsInHost() then
trap = SCTLR_EL1.EnALS == '0';
target_el = if HCR_EL2.TGE == '1' then EL2 else EL1;
else
trap = SCTLR_EL2.EnALS == '0';
target_el = EL2;
if (!trap && EL2Enabled() && HaveFeatHCX() &&
((PSTATE.EL == EL0 && !IsInHost()) || PSTATE.EL == EL1)) then
trap = !IsHCRXEL2Enabled() || HCRX_EL2.EnALS == '0';
target_el = EL2;
if trap then LDST64BTrap(target_el, iss);();
// CheckST64BV0Enabled()
// CheckLDST64BEnabled()
// =====================
// Checks for trap on ST64BV0 instruction// Checks for trap on ST64B and LD64B instructions
CheckST64BV0Enabled()
CheckLDST64BEnabled()
boolean trap = FALSE;
bits(25) iss = ZeroExtend('1'); // 0x1
('10'); // 0x2
if PSTATE.EL == EL0 then
if !IsInHost() then
trap = SCTLR_EL1.EnAS0 == '0';
trap = SCTLR_EL1.EnALS == '0';
target_el = if HCR_EL2.TGE == '1' then EL2 else EL1;
else
trap = SCTLR_EL2.EnAS0 == '0';
trap = SCTLR_EL2.EnALS == '0';
target_el = EL2;
if (!trap && if !trap && ((PSTATE.EL == EL2Enabled() && HaveFeatHCX() &&
((PSTATE.EL == EL0 && !IsInHost()) || PSTATE.EL == EL1)) then
trap = !) then
trap =IsHCRXEL2EnabledEL2Enabled() || HCRX_EL2.EnAS0 == '0';
() && HCRX_EL2.EnALS == '0';
target_el = EL2;
if !trap && PSTATE.EL != EL3 then
trap = HaveEL(EL3) && SCR_EL3.EnAS0 == '0';
target_el = EL3;
if trap then LDST64BTrap(target_el, iss);
// CheckST64BVEnabled()
// ====================
// Checks for trap on ST64BV instruction// CheckST64BV0Enabled()
// =====================
// Checks for trap on ST64BV0 instruction
CheckST64BVEnabled()
CheckST64BV0Enabled()
boolean trap = FALSE;
bits(25) iss = ZerosZeroExtend();
('1'); // 0x1
if PSTATE.EL == EL0 then
if !IsInHost() then
trap = SCTLR_EL1.EnASR == '0';
trap = SCTLR_EL1.EnAS0 == '0';
target_el = if HCR_EL2.TGE == '1' then EL2 else EL1;
else
trap = SCTLR_EL2.EnASR == '0';
trap = SCTLR_EL2.EnAS0 == '0';
target_el = EL2;
if (!trap && if !trap && ((PSTATE.EL == EL0 && !IsInHost()) || PSTATE.EL == EL1) then
trap = EL2Enabled() &&() && HCRX_EL2.EnAS0 == '0';
target_el = HaveFeatHCXEL2() &&
((PSTATE.EL ==;
if !trap && ((PSTATE.EL IN { EL0 && !,IsInHost()) || PSTATE.EL == EL1)) then
trap = !}) || PSTATE.EL ==IsHCRXEL2EnabledEL2() || HCRX_EL2.EnASR == '0';
target_el =) then
trap = (EL3) && SCR_EL3.EnAS0 == '0';
target_el = EL3EL2HaveEL;
if trap then LDST64BTrap(target_el, iss);
// LDST64BTrap()
// =============
// Trapped access to LD64B, ST64B, ST64BV and ST64BV0 instructions// CheckST64BVEnabled()
// ====================
// Checks for trap on ST64BV instruction
LDST64BTrap(bits(2) target_el, bits(25) iss)
bits(64) preferred_exception_return =CheckST64BVEnabled()
boolean trap = FALSE;
bits(25) iss = ThisInstrAddrZeros();
vect_offset = 0x0;
exception = if PSTATE.EL == ExceptionSyndromeEL0(then
if !Exception_LDST64BTrapIsInHost);
exception.syndrome = iss;() then
trap = SCTLR_EL1.EnASR == '0';
target_el = if HCR_EL2.TGE == '1' then
else EL1;
else
trap = SCTLR_EL2.EnASR == '0';
target_el = EL2;
if !trap && ((PSTATE.EL == EL0 && !IsInHost()) || PSTATE.EL == EL1) then
trap = EL2Enabled() && HCRX_EL2.EnASR == '0';
target_el = EL2;
if trap then LDST64BTrapAArch64.TakeExceptionEL2(target_el, exception, preferred_exception_return, vect_offset);
return;(target_el, iss);
// WFETrapDelay()
// ==============
// Returns TRUE when delay in trap to WFE is enabled with value to amount of delay,
// FALSE otherwise.
(boolean, integer)// LDST64BTrap()
// =============
// Trapped access to LD64B, ST64B, ST64BV and ST64BV0 instructions WFETrapDelay(bits(2) target_el)
case target_el of
whenLDST64BTrap(bits(2) target_el, bits(25) iss)
bits(64) preferred_exception_return = EL1ThisInstrAddr
if !();
vect_offset = 0x0;
exception =IsInHostExceptionSyndrome() then
delay_enabled = SCTLR_EL1.TWEDEn == '1';
delay = 1 << ((UIntException_LDST64BTrap(SCTLR_EL1.TWEDEL) + 8);
else
delay_enabled = SCTLR_EL2.TWEDEn == '1';
delay = 1 << ();
exception.syndrome = iss;UIntAArch64.TakeException(SCTLR_EL2.TWEDEL) + 8);
when EL2
delay_enabled = HCR_EL2.TWEDEn == '1';
delay = 1 << (UInt(HCR_EL2.TWEDEL) + 8);
when EL3
delay_enabled = SCR_EL3.TWEDEn == '1';
delay = 1 << (UInt(SCR_EL3.TWEDEL) + 8);
(target_el, exception, preferred_exception_return, vect_offset);
return (delay_enabled, delay); return;
// WaitForEventUntilDelay()
// ========================
// Returns TRUE if WaitForEvent() returns before WFE trap delay expires,
// WFETrapDelay()
// ==============
// Returns TRUE when delay in trap to WFE is enabled with value to amount of delay,
// FALSE otherwise.
boolean(boolean, integer) WaitForEventUntilDelay(boolean delay_enabled, integer delay)
boolean eventarrived = FALSE;
// set eventarrived to TRUE if WaitForEvent() returns before
// 'delay' expires when delay_enabled is TRUE.
return eventarrived;WFETrapDelay(bits(2) target_el)
case target_el of
whenEL1
if !IsInHost() then
delay_enabled = SCTLR_EL1.TWEDEn == '1';
delay = 1 << (UInt(SCTLR_EL1.TWEDEL) + 8);
else
delay_enabled = SCTLR_EL2.TWEDEn == '1';
delay = 1 << (UInt(SCTLR_EL2.TWEDEL) + 8);
when EL2
delay_enabled = HCR_EL2.TWEDEn == '1';
delay = 1 << (UInt(HCR_EL2.TWEDEL) + 8);
when EL3
delay_enabled = SCR_EL3.TWEDEn == '1';
delay = 1 << (UInt(SCR_EL3.TWEDEL) + 8);
return (delay_enabled, delay);
// AArch64.CreateFaultRecord()
// ===========================
// WaitForEventUntilDelay()
// ========================
// Returns TRUE if WaitForEvent() returns before WFE trap delay expires,
// FALSE otherwise.
FaultRecordboolean AArch64.CreateFaultRecord(WaitForEventUntilDelay(boolean delay_enabled, integer delay)
boolean eventarrived = FALSE;
// set eventarrived to TRUE if WaitForEvent() returns before
// 'delay' expires when delay_enabled is TRUE.
return eventarrived;Fault statuscode, bits(52) ipaddress, boolean NS,
integer level, AccType acctype, boolean write, bit extflag,
bits(2) errortype, boolean secondstage, boolean s2fs1walk)
FaultRecord fault;
fault.statuscode = statuscode;
fault.domain = bits(4) UNKNOWN; // Not used from AArch64
fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64
fault.errortype = errortype;
fault.ipaddress.NS = if NS then '1' else '0';
fault.ipaddress.address = ipaddress;
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
return fault;
// AArch64.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// an Exception Level using AArch64.
// AArch64.CreateFaultRecord()
// ===========================
(bits(25), bits(5))FaultRecord AArch64.FaultSyndrome(boolean d_side,AArch64.CreateFaultRecord( FaultRecordFault fault)
assert fault.statuscode !=statuscode, bits(52) ipaddress, boolean NS,
integer level, Fault_NoneAccType;
bits(25) iss =acctype, boolean write, bit extflag,
bits(2) errortype, boolean secondstage, boolean s2fs1walk) ZerosFaultRecord();
bits(5) iss2 = Zeros();
if !HaveFeatLS64() && HaveRASExt() && IsAsyncAbort(fault) then
iss<12:11> = fault.errortype; // SET
if d_side then
if HaveFeatLS64() && fault.acctype == AccType_ATOMICLS64 then
if (fault.statuscode IN {Fault_AccessFlag,
Fault_Translation, Fault_Permission}) then
(iss2, iss<24:14>, iss<12:11>) = LS64InstructionSyndrome();
else
if (IsSecondStage(fault) && !fault.s2fs1walk &&
(!IsExternalSyncAbort(fault) ||
(!HaveRASExt() && fault.acctype == AccType_TTW &&
boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk"))) then
iss<24:14> = LSInstructionSyndrome();
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
iss<13> = '1'; // Fault is generated by use of VNCR_EL2
if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT, AccType_ATPAN} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level);
fault;
fault.statuscode = statuscode;
fault.domain = bits(4) UNKNOWN; // Not used from AArch64
fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64
fault.errortype = errortype;
fault.ipaddress.NS = if NS then '1' else '0';
fault.ipaddress.address = ipaddress;
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
return (iss, iss2); return fault;
// Returns the syndrome information and LST for a Data Abort by a
// ST64B, ST64BV, ST64BV0, or LD64B instruction. The syndrome information
// includes the ISS2, extended syndrome field, and LST.
(bits(5), bits(11), bits(2))// AArch64.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// an Exception Level using AArch64.
(bits(25), bits(5)) LS64InstructionSyndrome();AArch64.FaultSyndrome(boolean d_side,FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(25) iss = Zeros();
bits(5) iss2 = Zeros();
if (HaveFeatLS64() && fault.acctype == AccType_ATOMICLS64 &&
fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_Permission}) then
iss2 = AArch64.RegisterSpecifier();
if HaveRASExt() && IsAsyncAbort(fault) then
if (HaveFeatLS64() && fault.acctype == AccType_ATOMICLS64 &&
fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_Permission}) then
iss<12:11> = '01'; // LST
else
iss<12:11> = fault.errortype; // SET
if d_side then
if ( IsSecondStage(fault) && !fault.s2fs1walk && (!IsExternalSyncAbort(fault) ||
(!HaveRASExt() && fault.acctype == AccType_TTW &&
boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk")) ) then
iss<24:14> = LSInstructionSyndrome();
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
iss<13> = '1'; // Fault is generated by use of VNCR_EL2
if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level);
return (iss, iss2);
// AArch64.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean AArch64.ExclusiveMonitorsPass(bits(64) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype = AccType_ATOMIC; iswrite = TRUE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); passed = AArch64.IsExclusiveVA(address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch64.IsExclusiveVA(bits(64) address, integer processorid, integer size);
// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch64.MarkExclusiveVA(bits(64) address, integer processorid, integer size);
// AArch64.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. AArch64.SetExclusiveMonitors(bits(64) address, integer size) acctype = AccType_ATOMIC; iswrite = FALSE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch64.MarkExclusiveVA(address, ProcessorID(), size);
// FPRSqrtStepFused() // ================== bits(N) FPRSqrtStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result; FPCRType fpcr = FPCR[]; op1 = FPNeg(op1); boolean altfp = HaveAltFP() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then fpcr.RMode = '00'; // Use RNE rounding mode (type1,sign1,value1) = FPUnpack(op1, fpcr, fpexc); (type2,sign2,value2) = FPUnpack(op2, fpcr, fpexc); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, FALSE, fpexc); FPRounding rounding = FPRoundingMode(fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPOnePointFive('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add and halve result_value = (3.0 + (value1 * value2)) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRound(result_value, fpcr, rounding, fpexc); return result;
// FPRecipStepFused() // ================== bits(N) FPRecipStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result; FPCRType fpcr = FPCR[]; op1 = FPNeg(op1); boolean altfp = HaveAltFP() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then fpcr.RMode = '00'; // Use RNE rounding mode (type1,sign1,value1) = FPUnpack(op1, fpcr, fpexc); (type2,sign2,value2) = FPUnpack(op2, fpcr, fpexc); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, FALSE, fpexc); FPRounding rounding = FPRoundingMode(fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPTwo('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add result_value = 2.0 + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRound(result_value, fpcr, rounding, fpexc); return result;
// AArch64.AccessIsTagChecked() // ============================ // TRUE if a given access is tag-checked, FALSE otherwise. boolean AArch64.AccessIsTagChecked(bits(64) vaddr, AccType acctype) if PSTATE.M<4> == '1' then return FALSE; if EffectiveTBI(vaddr, FALSE, PSTATE.EL) == '0' then return FALSE; if EffectiveTCMA(vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then return FALSE; if !AArch64.AllocationTagAccessIsEnabled(acctype) then return FALSE; if acctype IN {AccType_IFETCH, AccType_TTW} then return FALSE; if acctype == AccType_NV2REGISTER then return FALSE; if PSTATE.TCO=='1' then return FALSE; if !IsTagCheckedInstruction() then return FALSE; return TRUE;
// AArch64.AddressWithAllocationTag() // ================================== // Generate a 64-bit value containing a Logical Address Tag from a 64-bit // virtual address and an Allocation Tag. // If the extension is disabled, treats the Allocation Tag as '0000'. bits(64) AArch64.AddressWithAllocationTag(bits(64) address, AccType acctype, bits(4) allocation_tag) bits(64) result = address; bits(4) tag; if AArch64.AllocationTagAccessIsEnabled(acctype) then tag = allocation_tag; else tag = '0000'; result<59:56> = tag; return result;
// AArch64.AllocationTagFromAddress() // ================================== // Generate an Allocation Tag from a 64-bit value containing a Logical Address Tag. bits(4) AArch64.AllocationTagFromAddress(bits(64) tagged_address) return tagged_address<59:56>;
// AArch64.CheckAlignment()
// ========================
boolean AArch64.CheckAlignment(bits(64) address, integer alignment, AccType acctype,
boolean iswrite)
aligned = (address == Align(address, alignment));
atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC,
AccType_ORDEREDATOMICRW, AccType_ATOMICLS64,};
ordered = acctype IN { AccType_A32LSMD};
ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
vector = acctype == AccType_VEC;
if SCTLR[].A == '1' then check = TRUE;
elsif HaveLSE2Ext() then
check = (UInt(address<0+:4>) + alignment > 16) && ((ordered && SCTLR[].nAA == '0') || atomic);
else check = atomic || ordered;
if check && !aligned then
secondstage = FALSE;
AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage));
return aligned;
// AArch64.CheckTag() // ================== // Performs a Tag Check operation for a memory access and returns // whether the check passed boolean AArch64.CheckTag(AddressDescriptor memaddrdesc, bits(4) ptag, boolean write) if memaddrdesc.memattrs.tagged then return ptag == _MemTag[memaddrdesc]; else return TRUE;
// AArch64.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
bits(size*8) AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTE2ExtHaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite);
value = _Mem[memaddrdesc, size, accdesc, FALSE];
return value;
// AArch64.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes.
AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTE2ExtHaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;
// AArch64.MemTag[] - non-assignment (read) form // ============================================= // Load an Allocation Tag from memory. bits(4) AArch64.MemTag[bits(64) address, AccType acctype] assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; bits(4) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, TRUE, TAG_GRANULE); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Return the granule tag if tagging is enabled... if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then return _MemTag[memaddrdesc]; else // ...otherwise read tag as zero. return '0000'; // AArch64.MemTag[] - assignment (write) form // ========================================== // Store an Allocation Tag to memory. AArch64.MemTag[bits(64) address, AccType acctype] = bits(4) value assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; iswrite = TRUE; // Stores of allocation tags must be aligned if address != Align(address, TAG_GRANULE) then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); wasaligned = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, TAG_GRANULE); // It is CONSTRAINED UNPREDICTABLE if tags stored to memory locations marked as Device // generate an Alignment Fault or store the data to locations. if memaddrdesc.memattrs.memtype == MemType_Device then c = ConstrainUnpredictable(Unpredictable_DEVICETAGSTORE); assert c IN {Constraint_NONE, Constraint_FAULT}; if c == Constraint_FAULT then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then _MemTag[memaddrdesc] = value;
// AArch64.PhysicalTag() // ===================== // Generate a Physical Tag from a Logical Tag in an address bits(4) AArch64.PhysicalTag(bits(64) vaddr) return vaddr<59:56>;
// AArch64.TranslateAddressForAtomicAccess()
// =========================================
// Performs an alignment check for atomic memory operations.
// Also translates 64-bit Virtual Address into Physical Address.
AddressDescriptor AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits)
boolean iswrite = FALSE;
size = sizeinbits DIV 8;
assert size IN {1, 2, 4, 8, 16};
aligned = AArch64.CheckAlignment(address, size, AccType_ATOMICRW, iswrite);
// MMU or MPU lookup
memaddrdesc = AArch64.TranslateAddress(address, AccType_ATOMICRW, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
if HaveMTE2ExtHaveMTEExt() && AArch64.AccessIsTagChecked(address, AccType_ATOMICRW) then
bits(4) ptag = AArch64.PhysicalTag(address);
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(address, AccType_ATOMICRW, iswrite);
return memaddrdesc;
// Returns TRUE if the 64-byte block following the given address supports the // LD64B and ST64B instructions, and FALSE otherwise. boolean AddressSupportsLS64(bits(64) address);
// CheckSPAlignment() // ================== // Check correct stack pointer alignment for AArch64 state. CheckSPAlignment() bits(64) sp = SP[]; if PSTATE.EL == EL0 then stack_align_check = (SCTLR[].SA0 != '0'); else stack_align_check = (SCTLR[].SA != '0'); if stack_align_check && sp != Align(sp, 16) then AArch64.SPAlignmentFault(); return;
// If the implementation supports changing the block size without a break-before-make // approach, then for implementations that have level 1 or 2 support, the nT bit in // the block descriptor is valid. boolean IsBlockDescriptorNTBitValid();
// Returns True if the current instruction uses tag-checked memory access, // False otherwise. boolean IsTagCheckedInstruction();
// Mem[] - non-assignment (read) form // ================================== // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch64.MemSingle directly. bits(size*8) Mem[bits(64) address, integer size, AccType acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; value<7:0> = AArch64.MemSingle[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned]; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned]; value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned]; else value = AArch64.MemSingle[address, size, acctype, aligned]; if BigEndian(acctype) then value = BigEndianReverse(value); return value; // Mem[] - assignment (write) form // =============================== // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem[bits(64) address, integer size, AccType acctype] = bits(size*8) value boolean iswrite = TRUE; if BigEndian(acctype) then value = BigEndianReverse(value); aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>; AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>; else AArch64.MemSingle[address, size, acctype, aligned] = value; return;
// MemAtomic() // =========== // Performs load and store memory operations for a given virtual address. bits(size) MemAtomic(bits(64) address, MemAtomicOp op, bits(size) value, AccType ldacctype, AccType stacctype) bits(size) newvalue; memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); case op of when MemAtomicOp_ADD newvalue = oldvalue + value; when MemAtomicOp_BIC newvalue = oldvalue AND NOT(value); when MemAtomicOp_EOR newvalue = oldvalue EOR value; when MemAtomicOp_ORR newvalue = oldvalue OR value; when MemAtomicOp_SMAX newvalue = if SInt(oldvalue) > SInt(value) then oldvalue else value; when MemAtomicOp_SMIN newvalue = if SInt(oldvalue) > SInt(value) then value else oldvalue; when MemAtomicOp_UMAX newvalue = if UInt(oldvalue) > UInt(value) then oldvalue else value; when MemAtomicOp_UMIN newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue; when MemAtomicOp_SWP newvalue = value; if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; // Load operations return the old (pre-operation) value return oldvalue;
// MemAtomicCompareAndSwap() // ========================= // Compares the value stored at the passed-in memory address against the passed-in expected // value. If the comparison is successful, the value at the passed-in memory address is swapped // with the passed-in new_value. bits(size) MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue, bits(size) newvalue, AccType ldacctype, AccType stacctype) memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); if oldvalue == expectedvalue then if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; return oldvalue;
// MemLoad64B()
// ============
// Performs an atomic 64-byte read from a given virtual address.
bits(512) MemLoad64B(bits(64) address, AccType acctype)
bits(512) data;
boolean iswrite = FALSE;
constant integer size = 64;
aligned = assert address == AArch64.CheckAlignmentAlign(address, size, acctype, iswrite);
(address, 64);
bits(512) data;
boolean iswrite = FALSE;
boolean aligned = TRUE;
if !AddressSupportsLS64(address) then
c = ConstrainUnpredictable(Unpredictable_LS64UNSUPPORTED);
assert c IN {Constraint_LIMITED_ATOMICITY, Constraint_FAULT};
if c == Constraint_FAULT then
// Generate a stage 1 Data Abort reported using the DFSC code of 110101.
boolean secondstage = FALSE;
boolean s2fs1walk = FALSE;
fault = AArch64.ExclusiveFault(acctype, iswrite, secondstage, s2fs1walk);(acctype, iswrite);
AArch64.Abort(address, fault);
else
// Accesses are not single-copy atomic above the byte level
for i = 0 to 63
data<7+8*i : 8*i> = AArch64.MemSingle[address+8*i, 1, acctype, aligned];
return data;
AddressDescriptor memaddrdesc;
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size);
(address, acctype, iswrite, aligned, 64);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
(), 64);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTE2ExtHaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(address, acctype, iswrite);
data = _Mem[memaddrdesc, size, accdesc, iswrite];
data = _Mem[memaddrdesc, 64, accdesc, iswrite];
return data;
// MemStore64B()
// =============
// Performs an atomic 64-byte store to a given virtual address. Function does
// not return the status of the store.
MemStore64B(bits(64) address, bits(512) value, AccType acctype)
boolean iswrite = TRUE;
constant integer size = 64;
aligned = assert address == AArch64.CheckAlignmentAlign(address, size, acctype, iswrite);
(address, 64);
if !AddressSupportsLS64(address) then
c = ConstrainUnpredictable(Unpredictable_LS64UNSUPPORTED);
assert c IN {Constraint_LIMITED_ATOMICITY, Constraint_FAULT};
if c == Constraint_FAULT then
// Generate a Data Abort reported using the DFSC code of 110101.
boolean secondstage = FALSE;
boolean s2fs1walk = FALSE;
fault = iswrite = TRUE;
fault = AArch64.ExclusiveFault(acctype, iswrite, secondstage, s2fs1walk);(acctype, iswrite);
AArch64.Abort(address, fault);
else
// Accesses are not single-copy atomic above the byte level.
aligned = TRUE;
for i = 0 to 63
AArch64.MemSingle[address+8*i, 1, acctype, aligned] = value<7+8*i : 8*i>;
else
-= MemStore64BWithRet(address, value, acctype); // Return status is ignored by ST64B
return;
// MemStore64BWithRet()
// ====================
// Performs an atomic 64-byte store to a given virtual address returning
// the status value of the operation.
bits(64) MemStore64BWithRet(bits(64) address, bits(512) value, AccType acctype)acctype)
assert address ==
Align(address, 64);
AddressDescriptor memaddrdesc;
boolean iswrite = TRUE;
constant integer size = 64;
boolean aligned = TRUE;
aligned = memaddrdesc = AArch64.CheckAlignment(address, size, acctype, iswrite);
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size);
(address, acctype, iswrite, aligned, 64);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
return ZeroExtend('1');
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), 64);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTE2ExtHaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFault(address, acctype, iswrite);
return ZeroExtend('1');
_Mem[memaddrdesc, size, accdesc] = value;
_Mem[memaddrdesc, 64, accdesc] = value;
status = MemStore64BWithRetStatus();
return status;
// Generates the return status of memory write with ST64BV or ST64BV0 // instructions. The status indicates if the operation succeeded, failed, // or was not supported at this memory location. bits(64) MemStore64BWithRetStatus();
// NVMem[] - non-assignment form // ============================= // This function is the load memory access for the transformed System register read access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the load memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. bits(64) NVMem[integer offset] assert offset > 0; bits(64) address = SignExtend(VNCR_EL2.BADDR:offset<11:0>, 64); return Mem[address, 8, AccType_NV2REGISTER]; // NVMem[] - assignment form // ========================= // This function is the store memory access for the transformed System register write access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the store memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. NVMem[integer offset] = bits(64) value assert offset > 0; bits(64) address = SignExtend(VNCR_EL2.BADDR:offset<11:0>, 64); Mem[address, 8, AccType_NV2REGISTER] = value; return;
// Flag the current instruction as using/not using memory tag checking. SetTagCheckedInstruction(boolean checked);
// This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access from the tag in PA space. // // The function address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(4) _MemTag[AddressDescriptor desc, AccessDescriptor accdesc]; // This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access to the tag in PA space. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. _MemTag[AddressDescriptor desc, AccessDescriptor accdesc] = bits(4) value;
// AddPAC() // ======== // Calculates the pointer authentication code for a 64-bit quantity and then // inserts that into pointer authentication code field of that 64-bit quantity. bits(64) AddPAC(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data) bits(64) PAC; bits(64) result; bits(64) ext_ptr; bits(64) extfield; bit selbit; boolean tbi = EffectiveTBI(ptr, !data, PSTATE.EL) == '1'; integer top_bit = if tbi then 55 else 63; // If tagged pointers are in use for a regime with two TTBRs, use bit<55> of // the pointer to select between upper and lower ranges, and preserve this. // This handles the awkward case where there is apparently no correct choice between // the upper and lower address range - ie an addr of 1xxxxxxx0... with TBI0=0 and TBI1=1 // and 0xxxxxxx1 with TBI1=0 and TBI0=1: if PtrHasUpperAndLowerAddRanges() then assert S1TranslationRegime() IN {EL1, EL2}; if S1TranslationRegime() == EL1 then // EL1 translation regime registers if data then if TCR_EL1.TBI1 == '1' || TCR_EL1.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0') || (TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else // EL2 translation regime registers if data then if TCR_EL2.TBI1 == '1' || TCR_EL2.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL2.TBI1 == '1' && TCR_EL2.TBID1 == '0') || (TCR_EL2.TBI0 == '1' && TCR_EL2.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else selbit = if tbi then ptr<55> else ptr<63>; integer bottom_PAC_bit = CalculateBottomPACBit(selbit); // The pointer authentication code field takes all the available bits in between extfield = Replicate(selbit, 64); // Compute the pointer authentication code for a ptr with good extension bits if tbi then ext_ptr = ptr<63:56>:extfield<(56-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; else ext_ptr = extfield<(64-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; PAC = ComputePAC(ext_ptr, modifier, K<127:64>, K<63:0>); // Check if the ptr has good extension bits and corrupt the pointer authentication code if not if !IsZero(ptr<top_bit:bottom_PAC_bit>) && !IsOnes(ptr<top_bit:bottom_PAC_bit>) then if HaveEnhancedPAC() then PAC = 0x0000000000000000<63:0>; elsif !HaveEnhancedPAC2() then PAC<top_bit-1> = NOT(PAC<top_bit-1>); // preserve the determination between upper and lower address at bit<55> and insert PAC if !HaveEnhancedPAC2() then if tbi then result = ptr<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else result = PAC<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else if tbi then result = ptr<63:56>:selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; else result = (ptr<63:56> EOR PAC<63:56>):selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; return result;
// AddPACDA() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDAKey_EL1. bits(64) AddPACDA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDAKey_EL1; APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPAC(X, Y, APDAKey_EL1, TRUE);
// AddPACDB() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDBKey_EL1. bits(64) AddPACDB(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDBKey_EL1; APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPAC(X, Y, APDBKey_EL1, TRUE);
// AddPACGA() // ========== // Returns a 64-bit value where the lower 32 bits are 0, and the upper 32 bits contain // a 32-bit pointer authentication code which is derived using a cryptographic // algorithm as a combination of X, Y and the APGAKey_EL1. bits(64) AddPACGA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(128) APGAKey_EL1; APGAKey_EL1 = APGAKeyHi_EL1<63:0> : APGAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 TrapEL2 = FALSE; TrapEL3 = FALSE; if TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return ComputePAC(X, Y, APGAKey_EL1<127:64>, APGAKey_EL1<63:0>)<63:32>:Zeros(32);
// AddPACIA() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y, and the // APIAKey_EL1. bits(64) AddPACIA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIAKey_EL1; APIAKey_EL1 = APIAKeyHi_EL1<63:0>:APIAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPAC(X, Y, APIAKey_EL1, FALSE);
// AddPACIB() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APIBKey_EL1. bits(64) AddPACIB(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIBKey_EL1; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPAC(X, Y, APIBKey_EL1, FALSE);
// AArch64.PACFailException() // ========================== // Generates a PAC Fail Exception AArch64.PACFailException(bits(2) syndrome) route_to_el2 = PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_PACFail); exception.syndrome<1:0> = syndrome; exception.syndrome<24:2> = Zeros(); // RES0 if UInt(PSTATE.EL) > UInt(EL0) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// Auth() // ====== // Restores the upper bits of the address to be all zeros or all ones (based on the // value of bit[55]) and computes and checks the pointer authentication code. If the // check passes, then the restored address is returned. If the check fails, the // second-top and third-top bits of the extension bits in the pointer authentication code // field are corrupted to ensure that accessing the address will give a translation fault. bits(64) Auth(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data, bit key_number, boolean is_combined) bits(64) PAC; bits(64) result; bits(64) original_ptr; bits(2) error_code; bits(64) extfield; // Reconstruct the extension field used of adding the PAC to the pointer boolean tbi = EffectiveTBI(ptr, !data, PSTATE.EL) == '1'; integer bottom_PAC_bit = CalculateBottomPACBit(ptr<55>); extfield = Replicate(ptr<55>, 64); if tbi then original_ptr = ptr<63:56>:extfield<56-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; else original_ptr = extfield<64-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; PAC = ComputePAC(original_ptr, modifier, K<127:64>, K<63:0>); // Check pointer authentication code if tbi then if !HaveEnhancedPAC2() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63:55>:error_code:original_ptr<52:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; if HaveFPACCombined() || (HaveFPAC() && !is_combined) then if result<54:bottom_PAC_bit> != Replicate(result<55>, (55-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number; AArch64.PACFailException(error_code); else if !HaveEnhancedPAC2() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> && PAC<63:56> == ptr<63:56> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63>:error_code:original_ptr<60:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; result<63:56> = result<63:56> EOR PAC<63:56>; if HaveFPACCombined() || (HaveFPAC() && !is_combined) then if result<63:bottom_PAC_bit> != Replicate(result<55>, (64-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number; AArch64.PACFailException(error_code); return result;
// AuthDA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACDA(). bits(64) AuthDA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDAKey_EL1; APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APDAKey_EL1, TRUE, '0', is_combined);
// AuthDB() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a // pointer authentication code in the pointer authentication code field bits of X, using // the same algorithm and key as AddPACDB(). bits(64) AuthDB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDBKey_EL1; APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APDBKey_EL1, TRUE, '1', is_combined);
// AuthIA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIA(). bits(64) AuthIA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIAKey_EL1; APIAKey_EL1 = APIAKeyHi_EL1<63:0> : APIAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APIAKey_EL1, FALSE, '0', is_combined);
// AuthIB() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIB(). bits(64) AuthIB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIBKey_EL1; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APIBKey_EL1, FALSE, '1', is_combined);
// CalculateBottomPACBit() // ======================= integer CalculateBottomPACBit(bit top_bit) integer tsz_field; if PtrHasUpperAndLowerAddRanges() then assert S1TranslationRegime() IN {EL1, EL2}; if S1TranslationRegime() == EL1 then // EL1 translation regime registers tsz_field = if top_bit == '1' then UInt(TCR_EL1.T1SZ) else UInt(TCR_EL1.T0SZ); using64k = if top_bit == '1' then TCR_EL1.TG1 == '11' else TCR_EL1.TG0 == '01'; else // EL2 translation regime registers assert HaveEL(EL2); tsz_field = if top_bit == '1' then UInt(TCR_EL2.T1SZ) else UInt(TCR_EL2.T0SZ); using64k = if top_bit == '1' then TCR_EL2.TG1 == '11' else TCR_EL2.TG0 == '01'; else tsz_field = if PSTATE.EL == EL2 then UInt(TCR_EL2.T0SZ) else UInt(TCR_EL3.T0SZ); using64k = if PSTATE.EL == EL2 then TCR_EL2.TG0 == '01' else TCR_EL3.TG0 == '01'; max_limit_tsz_field = (if !HaveSmallTranslationTableExt() then 39 else if using64k then 47 else 48); if tsz_field > max_limit_tsz_field then // TCR_ELx.TySZ is out of range c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCE then tsz_field = max_limit_tsz_field; tszmin = if using64k && VAMax() == 52 then 12 else 16; if tsz_field < tszmin then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCE then tsz_field = tszmin; return (64-tsz_field);
array bits(64) RC[0..4]; bits(64) ComputePAC(bits(64) data, bits(64) modifier, bits(64) key0, bits(64) key1) bits(64) workingval; bits(64) runningmod; bits(64) roundkey; bits(64) modk0; constant bits(64) Alpha = 0xC0AC29B7C97C50DD<63:0>; RC[0] = 0x0000000000000000<63:0>; RC[1] = 0x13198A2E03707344<63:0>; RC[2] = 0xA4093822299F31D0<63:0>; RC[3] = 0x082EFA98EC4E6C89<63:0>; RC[4] = 0x452821E638D01377<63:0>; modk0 = key0<0>:key0<63:2>:(key0<63> EOR key0<1>); runningmod = modifier; workingval = data EOR key0; for i = 0 to 4 roundkey = key1 EOR runningmod; workingval = workingval EOR roundkey; workingval = workingval EOR RC[i]; if i > 0 then workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = PACSub(workingval); runningmod = TweakShuffle(runningmod<63:0>); roundkey = modk0 EOR runningmod; workingval = workingval EOR roundkey; workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = PACSub(workingval); workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = key1 EOR workingval; workingval = PACCellInvShuffle(workingval); workingval = PACInvSub(workingval); workingval = PACMult(workingval); workingval = PACCellInvShuffle(workingval); workingval = workingval EOR key0; workingval = workingval EOR runningmod; for i = 0 to 4 workingval = PACInvSub(workingval); if i < 4 then workingval = PACMult(workingval); workingval = PACCellInvShuffle(workingval); runningmod = TweakInvShuffle(runningmod<63:0>); roundkey = key1 EOR runningmod; workingval = workingval EOR RC[4-i]; workingval = workingval EOR roundkey; workingval = workingval EOR Alpha; workingval = workingval EOR modk0; return workingval;
// PACCellInvShuffle() // =================== bits(64) PACCellInvShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<15:12>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<51:48>; outdata<15:12> = indata<39:36>; outdata<19:16> = indata<59:56>; outdata<23:20> = indata<47:44>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<19:16>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<31:28>; outdata<47:44> = indata<11:8>; outdata<51:48> = indata<23:20>; outdata<55:52> = indata<3:0>; outdata<59:56> = indata<43:40>; outdata<63:60> = indata<63:60>; return outdata;
// PACCellShuffle() // ================ bits(64) PACCellShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<55:52>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<47:44>; outdata<15:12> = indata<3:0>; outdata<19:16> = indata<31:28>; outdata<23:20> = indata<51:48>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<43:40>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<15:12>; outdata<43:40> = indata<59:56>; outdata<47:44> = indata<23:20>; outdata<51:48> = indata<11:8>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<19:16>; outdata<63:60> = indata<63:60>; return outdata;
// PACInvSub() // =========== bits(64) PACInvSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '0101'; when '0001' Toutput<4*i+3:4*i> = '1110'; when '0010' Toutput<4*i+3:4*i> = '1101'; when '0011' Toutput<4*i+3:4*i> = '1000'; when '0100' Toutput<4*i+3:4*i> = '1010'; when '0101' Toutput<4*i+3:4*i> = '1011'; when '0110' Toutput<4*i+3:4*i> = '0001'; when '0111' Toutput<4*i+3:4*i> = '1001'; when '1000' Toutput<4*i+3:4*i> = '0010'; when '1001' Toutput<4*i+3:4*i> = '0110'; when '1010' Toutput<4*i+3:4*i> = '1111'; when '1011' Toutput<4*i+3:4*i> = '0000'; when '1100' Toutput<4*i+3:4*i> = '0100'; when '1101' Toutput<4*i+3:4*i> = '1100'; when '1110' Toutput<4*i+3:4*i> = '0111'; when '1111' Toutput<4*i+3:4*i> = '0011'; return Toutput;
// PACMult() // ========= bits(64) PACMult(bits(64) Sinput) bits(4) t0; bits(4) t1; bits(4) t2; bits(4) t3; bits(64) Soutput; for i = 0 to 3 t0<3:0> = RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 2); t0<3:0> = t0<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1); t1<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1); t1<3:0> = t1<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 2); t2<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 2) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1); t2<3:0> = t2<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1); t3<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 2); t3<3:0> = t3<3:0> EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1); Soutput<4*i+3:4*i> = t3<3:0>; Soutput<4*(i+4)+3:4*(i+4)> = t2<3:0>; Soutput<4*(i+8)+3:4*(i+8)> = t1<3:0>; Soutput<4*(i+12)+3:4*(i+12)> = t0<3:0>; return Soutput;
// PACSub() // ======== bits(64) PACSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '1011'; when '0001' Toutput<4*i+3:4*i> = '0110'; when '0010' Toutput<4*i+3:4*i> = '1000'; when '0011' Toutput<4*i+3:4*i> = '1111'; when '0100' Toutput<4*i+3:4*i> = '1100'; when '0101' Toutput<4*i+3:4*i> = '0000'; when '0110' Toutput<4*i+3:4*i> = '1001'; when '0111' Toutput<4*i+3:4*i> = '1110'; when '1000' Toutput<4*i+3:4*i> = '0011'; when '1001' Toutput<4*i+3:4*i> = '0111'; when '1010' Toutput<4*i+3:4*i> = '0100'; when '1011' Toutput<4*i+3:4*i> = '0101'; when '1100' Toutput<4*i+3:4*i> = '1101'; when '1101' Toutput<4*i+3:4*i> = '0010'; when '1110' Toutput<4*i+3:4*i> = '0001'; when '1111' Toutput<4*i+3:4*i> = '1010'; return Toutput;
// RotCell() // ========= bits(4) RotCell(bits(4) incell, integer amount) bits(8) tmp; bits(4) outcell; // assert amount>3 || amount<1; tmp<7:0> = incell<3:0>:incell<3:0>; outcell = tmp<7-amount:4-amount>; return outcell;
// TweakCellInvRot() // ================= bits(4) TweakCellInvRot(bits(4)incell) bits(4) outcell; outcell<3> = incell<2>; outcell<2> = incell<1>; outcell<1> = incell<0>; outcell<0> = incell<0> EOR incell<3>; return outcell;
// TweakCellRot() // ============== bits(4) TweakCellRot(bits(4) incell) bits(4) outcell; outcell<3> = incell<0> EOR incell<1>; outcell<2> = incell<3>; outcell<1> = incell<2>; outcell<0> = incell<1>; return outcell;
// TweakInvShuffle() // ================= bits(64) TweakInvShuffle(bits(64)indata) bits(64) outdata; outdata<3:0> = TweakCellInvRot(indata<51:48>); outdata<7:4> = indata<55:52>; outdata<11:8> = indata<23:20>; outdata<15:12> = indata<27:24>; outdata<19:16> = indata<3:0>; outdata<23:20> = indata<7:4>; outdata<27:24> = TweakCellInvRot(indata<11:8>); outdata<31:28> = indata<15:12>; outdata<35:32> = TweakCellInvRot(indata<31:28>); outdata<39:36> = TweakCellInvRot(indata<63:60>); outdata<43:40> = TweakCellInvRot(indata<59:56>); outdata<47:44> = TweakCellInvRot(indata<19:16>); outdata<51:48> = indata<35:32>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<43:40>; outdata<63:60> = TweakCellInvRot(indata<47:44>); return outdata;
// TweakShuffle() // ============== bits(64) TweakShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<19:16>; outdata<7:4> = indata<23:20>; outdata<11:8> = TweakCellRot(indata<27:24>); outdata<15:12> = indata<31:28>; outdata<19:16> = TweakCellRot(indata<47:44>); outdata<23:20> = indata<11:8>; outdata<27:24> = indata<15:12>; outdata<31:28> = TweakCellRot(indata<35:32>); outdata<35:32> = indata<51:48>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<59:56>; outdata<47:44> = TweakCellRot(indata<63:60>); outdata<51:48> = TweakCellRot(indata<3:0>); outdata<55:52> = indata<7:4>; outdata<59:56> = TweakCellRot(indata<43:40>); outdata<63:60> = TweakCellRot(indata<39:36>); return outdata;
// HaveEnhancedPAC() // ================= // Returns TRUE if support for EnhancedPAC is implemented, FALSE otherwise. boolean HaveEnhancedPAC() return ( HavePACExt() && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC functionality" );
// HaveEnhancedPAC2() // ================== // Returns TRUE if support for EnhancedPAC2 is implemented, FALSE otherwise. boolean HaveEnhancedPAC2() return HasArchVersion(ARMv8p6) || (HasArchVersion(ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC 2 functionality");
// HaveFPAC() // ========== // Returns TRUE if support for FPAC is implemented, FALSE otherwise. boolean HaveFPAC() return HaveEnhancedPAC2() && boolean IMPLEMENTATION_DEFINED "Has FPAC functionality";
// HaveFPACCombined() // ================== // Returns TRUE if support for FPACCombined is implemented, FALSE otherwise. boolean HaveFPACCombined() return HaveFPAC() && boolean IMPLEMENTATION_DEFINED "Has FPAC Combined functionality";
// HavePACExt() // ============ // Returns TRUE if support for the PAC extension is implemented, FALSE otherwise. boolean HavePACExt() return HasArchVersion(ARMv8p3);
// PtrHasUpperAndLowerAddRanges() // ============================== // Returns TRUE if the pointer has upper and lower address ranges, FALSE otherwise. boolean PtrHasUpperAndLowerAddRanges() return PSTATE.EL == EL1 || PSTATE.EL == EL0 || (PSTATE.EL == EL2 && HCR_EL2.E2H == '1');
// Strip() // ======= // Strip() returns a 64-bit value containing A, but replacing the pointer authentication // code field bits with the extension of the address bits. This can apply to either // instructions or data, where, as the use of tagged pointers is distinct, it might be // handled differently. bits(64) Strip(bits(64) A, boolean data) bits(64) original_ptr; bits(64) extfield; boolean tbi = EffectiveTBI(A, !data, PSTATE.EL) == '1'; integer bottom_PAC_bit = CalculateBottomPACBit(A<55>); extfield = Replicate(A<55>, 64); if tbi then original_ptr = A<63:56>:extfield< 56-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; else original_ptr = extfield< 64-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; return original_ptr;
// TrapPACUse() // ============ // Used for the trapping of the pointer authentication functions by higher exception // levels. TrapPACUse(bits(2) target_el) assert HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); bits(64) preferred_exception_return = ThisInstrAddr(); ExceptionRecord exception; vect_offset = 0; exception = ExceptionSyndrome(Exception_PACTrap); AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.ESBOperation() // ====================== // Perform the AArch64 ESB operation, either for ESB executed in AArch64 state, or for // ESB in AArch32 state when SError interrupts are routed to an Exception level using // AArch64 AArch64.ESBOperation() route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1'; route_to_el2 = (EL2Enabled() && (HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1')); target = if route_to_el3 then EL3 elsif route_to_el2 then EL2 else EL1; if target == EL1 then mask_active = PSTATE.EL IN {EL0, EL1}; elsif HaveVirtHostExt() && target == EL2 && HCR_EL2.<E2H,TGE> == '11' then mask_active = PSTATE.EL IN {EL0, EL2}; else mask_active = PSTATE.EL == target; mask_set = (PSTATE.A == '1' && (!HaveDoubleFaultExt() || SCR_EL3.EA == '0' || PSTATE.EL != EL3 || SCR_EL3.NMEA == '0')); intdis = Halted() || ExternalDebugInterruptsDisabled(target); masked = (UInt(target) < UInt(PSTATE.EL)) || intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then // This function might be called for an interworking case, and INTdis is masking // the SError interrupt. if ELUsingAArch32(S1TranslationRegime()) then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); else implicit_esb = FALSE; syndrome64 = AArch64.PhysicalSErrorSyndrome(implicit_esb); DISR_EL1 = AArch64.ReportDeferredSError(syndrome64); ClearPendingPhysicalSError(); // Set ISR_EL1.A to 0 return;
// Return the SError syndrome bits(25) AArch64.PhysicalSErrorSyndrome(boolean implicit_esb);
// AArch64.ReportDeferredSError() // ============================== // Generate deferred SError syndrome bits(64) AArch64.ReportDeferredSError(bits(25) syndrome) bits(64) target; target<31> = '1'; // A target<24> = syndrome<24>; // IDS target<23:0> = syndrome<23:0>; // ISS return target;
// AArch64.vESBOperation() // ======================= // Perform the AArch64 ESB operation for virtual SError interrupts, either for ESB // executed in AArch64 state, or for ESB in AArch32 state with EL2 using AArch64 state AArch64.vESBOperation() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); // If physical SError interrupts are routed to EL2, and TGE is not set, then a virtual // SError interrupt might be pending vSEI_enabled = HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; vSEI_pending = vSEI_enabled && HCR_EL2.VSE == '1'; vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then // This function might be called for the interworking case, and INTdis is masking // the virtual SError interrupt. if ELUsingAArch32(EL1) then VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>); else VDISR_EL2 = AArch64.ReportDeferredSError(VSESR_EL2<24:0>); HCR_EL2.VSE = '0'; // Clear pending virtual SError return;
// AArch64.MaybeZeroRegisterUppers() // ================================= // On taking an exception to AArch64 from AArch32, it is CONSTRAINED UNPREDICTABLE whether the top // 32 bits of registers visible at any lower Exception level using AArch32 are set to zero. AArch64.MaybeZeroRegisterUppers() assert UsingAArch32(); // Always called from AArch32 state before entering AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then first = 0; last = 14; include_R15 = FALSE; elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !ELUsingAArch32(EL2) then first = 0; last = 30; include_R15 = FALSE; else first = 0; last = 30; include_R15 = TRUE; for n = first to last if (n != 15 || include_R15) && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[n]<63:32> = Zeros(); return;
// AArch64.ResetGeneralRegisters() // =============================== AArch64.ResetGeneralRegisters() for i = 0 to 30 X[i] = bits(64) UNKNOWN; return;
// AArch64.ResetSIMDFPRegisters() // ============================== AArch64.ResetSIMDFPRegisters() for i = 0 to 31 V[i] = bits(128) UNKNOWN; return;
// AArch64.ResetSpecialRegisters() // =============================== AArch64.ResetSpecialRegisters() // AArch64 special registers SP_EL0 = bits(64) UNKNOWN; SP_EL1 = bits(64) UNKNOWN; SPSR_EL1 = bits(64) UNKNOWN; ELR_EL1 = bits(64) UNKNOWN; if HaveEL(EL2) then SP_EL2 = bits(64) UNKNOWN; SPSR_EL2 = bits(64) UNKNOWN; ELR_EL2 = bits(64) UNKNOWN; if HaveEL(EL3) then SP_EL3 = bits(64) UNKNOWN; SPSR_EL3 = bits(64) UNKNOWN; ELR_EL3 = bits(64) UNKNOWN; // AArch32 special registers that are not architecturally mapped to AArch64 registers if HaveAArch32EL(EL1) then SPSR_fiq<31:0> = bits(32) UNKNOWN; SPSR_irq<31:0> = bits(32) UNKNOWN; SPSR_abt<31:0> = bits(32) UNKNOWN; SPSR_und<31:0> = bits(32) UNKNOWN; // External debug special registers DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN; return;
// PC - non-assignment form // ======================== // Read program counter. bits(64) PC[] return _PC;
// SP[] - assignment form // ====================== // Write to stack pointer from either a 32-bit or a 64-bit value. SP[] = bits(width) value assert width IN {32,64}; if PSTATE.SP == '0' then SP_EL0 = ZeroExtend(value); else case PSTATE.EL of when EL0 SP_EL0 = ZeroExtend(value); when EL1 SP_EL1 = ZeroExtend(value); when EL2 SP_EL2 = ZeroExtend(value); when EL3 SP_EL3 = ZeroExtend(value); return; // SP[] - non-assignment form // ========================== // Read stack pointer with implicit slice of 8, 16, 32 or 64 bits. bits(width) SP[] assert width IN {8,16,32,64}; if PSTATE.SP == '0' then return SP_EL0<width-1:0>; else case PSTATE.EL of when EL0 return SP_EL0<width-1:0>; when EL1 return SP_EL1<width-1:0>; when EL2 return SP_EL2<width-1:0>; when EL3 return SP_EL3<width-1:0>;
// V[] - assignment form // ===================== // Write to SIMD&FP register with implicit extension from // 8, 16, 32, 64 or 128 bits. V[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; integer vlen = if IsSVEEnabled(PSTATE.EL) then VL else 128; if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _Z[n] = ZeroExtend(value); else _Z[n]<vlen-1:0> = ZeroExtend(value); // V[] - non-assignment form // ========================= // Read from SIMD&FP register with implicit slice of 8, 16 // 32, 64 or 128 bits. bits(width) V[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; return _Z[n]<width-1:0>;
// Vpart[] - non-assignment form // ============================= // Reads a 128-bit SIMD&FP register in up to two parts: // part 0 returns the bottom 8, 16, 32 or 64 bits of a value held in the register; // part 1 returns the top half of the bottom 64 bits or the top half of the 128-bit // value held in the register. bits(width) Vpart[integer n, integer part] assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128; return V[n]; else assert width IN {32,64}; bits(128) vreg = V[n]; return vreg<(width * 2)-1:width>; // Vpart[] - assignment form // ========================= // Writes a 128-bit SIMD&FP register in up to two parts: // part 0 zero extends a 8, 16, 32, or 64-bit value to fill the whole register; // part 1 inserts a 64-bit value into the top half of the register. Vpart[integer n, integer part] = bits(width) value assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128; V[n] = value; else assert width == 64; bits(64) vreg = V[n]; V[n] = value<63:0> : vreg;
// X[] - assignment form // ===================== // Write to general-purpose register from either a 32-bit or a 64-bit value. X[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {32,64}; if n != 31 then _R[n] = ZeroExtend(value); return; // X[] - non-assignment form // ========================= // Read from general-purpose register with implicit slice of 8, 16, 32 or 64 bits. bits(width) X[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64}; if n != 31 then return _R[n]<width-1:0>; else return Zeros(width);
// AArch32.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch32 state and FALSE otherwise. boolean AArch32.IsFPEnabled(bits(2) el) if el == EL0 && !ELUsingAArch32(EL1) then return AArch64.IsFPEnabled(el); if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.cp10 == '0' then return FALSE; if el IN {EL0, EL1} then // Check if access disabled in CPACR case CPACR.cp10 of when '00' disabled = TRUE; when '01' disabled = el == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then return FALSE; if el IN {EL0, EL1, EL2} && EL2Enabled() then if !ELUsingAArch32(EL2) then return AArch64.IsFPEnabled(EL2); if HCPTR.TCP10 == '1' then return FALSE; if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then return FALSE; return TRUE;
// AArch64.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch64 state and FALSE otherwise. boolean AArch64.IsFPEnabled(bits(2) el) // Check if access disabled in CPACR_EL1 if el IN {EL0, EL1} && !IsInHost() then // Check FP&SIMD at EL0/EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0; when '11' disabled = FALSE; if disabled then return FALSE; // Check if access disabled in CPTR_EL2 if el IN {EL0, EL1, EL2} && EL2Enabled() then if HaveVirtHostExt() && HCR_EL2.E2H == '1' then case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then return FALSE; else if CPTR_EL2.TFP == '1' then return FALSE; // Check if access disabled in CPTR_EL3 if HaveEL(EL3) then if CPTR_EL3.TFP == '1' then return FALSE; return TRUE;
// AnyActiveElement()
// ==================
// Return TRUE if there is at least one active element in mask. Otherwise,
// return FALSE.
// CeilPow2()
// ==========
boolean// For a positive integer X, return the smallest power of 2 >= X
integer AnyActiveElement(bits(N) mask, integer esize)
CeilPow2(integer x)
if x == 0 then return 0;
if x == 1 then return 2;
return LastActiveElementFloorPow2(mask, esize) >= 0;(x - 1) * 2;
// CeilPow2()
// ==========
// For a positive integer X, return the smallest power of 2 >= X
integer// CheckSVEEnabled()
// =================
// Checks for traps on SVE instructions and instructions that
// access SVE System registers. CeilPow2(integer x)
if x == 0 then return 0;
if x == 1 then return 2;
returnCheckSVEEnabled()
// Check if access disabled in CPACR_EL1
if PSTATE.EL IN { , EL1} && !IsInHost() then
// Check SVE at EL0/EL1
case CPACR_EL1.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then SVEAccessTrap(EL1);
// Check SIMD&FP at EL0/EL1
case CPACR_EL1.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL1);
// Check if access disabled in CPTR_EL2
if PSTATE.EL IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
// Check SVE at EL2
case CPTR_EL2.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then SVEAccessTrap(EL2);
// Check SIMD&FP at EL2
case CPTR_EL2.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL2);
else
if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2);
if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2);
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3);
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3FloorPow2EL0(x - 1) * 2;);
// CheckSVEEnabled()
// DecodePredCount()
// =================
// Checks for traps on SVE instructions and instructions that
// access SVE System registers.
integer
CheckSVEEnabled()
// Check if access disabled in CPACR_EL1
if PSTATE.EL IN {DecodePredCount(bits(5) pattern, integer esize)
integer elements =EL0VL,DIV esize;
integer numElem;
case pattern of
when '00000' numElem = EL1FloorPow2} && !IsInHost() then
// Check SVE at EL0/EL1
case CPACR_EL1.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then SVEAccessTrap(EL1);
// Check SIMD&FP at EL0/EL1
case CPACR_EL1.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL1);
// Check if access disabled in CPTR_EL2
if PSTATE.EL IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
// Check SVE at EL2
case CPTR_EL2.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then SVEAccessTrap(EL2);
// Check SIMD&FP at EL2
case CPTR_EL2.FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL2);
else
if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2);
if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2);
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3);
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3);(elements);
when '00001' numElem = if elements >= 1 then 1 else 0;
when '00010' numElem = if elements >= 2 then 2 else 0;
when '00011' numElem = if elements >= 3 then 3 else 0;
when '00100' numElem = if elements >= 4 then 4 else 0;
when '00101' numElem = if elements >= 5 then 5 else 0;
when '00110' numElem = if elements >= 6 then 6 else 0;
when '00111' numElem = if elements >= 7 then 7 else 0;
when '01000' numElem = if elements >= 8 then 8 else 0;
when '01001' numElem = if elements >= 16 then 16 else 0;
when '01010' numElem = if elements >= 32 then 32 else 0;
when '01011' numElem = if elements >= 64 then 64 else 0;
when '01100' numElem = if elements >= 128 then 128 else 0;
when '01101' numElem = if elements >= 256 then 256 else 0;
when '11101' numElem = elements - (elements MOD 4);
when '11110' numElem = elements - (elements MOD 3);
when '11111' numElem = elements;
otherwise numElem = 0;
return numElem;
// DecodePredCount()
// =================
// ElemFFR[] - non-assignment form
// ===============================
integerbit DecodePredCount(bits(5) pattern, integer esize)
integer elements =ElemFFR[integer e, integer esize]
return VLElemP DIV esize;
integer numElem;
case pattern of
when '00000' numElem =[_FFR, e, esize];
// ElemFFR[] - assignment form
// =========================== ElemFFR[integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= PL;
_FFR<n+psize-1:n> = ZeroExtendFloorPow2(elements);
when '00001' numElem = if elements >= 1 then 1 else 0;
when '00010' numElem = if elements >= 2 then 2 else 0;
when '00011' numElem = if elements >= 3 then 3 else 0;
when '00100' numElem = if elements >= 4 then 4 else 0;
when '00101' numElem = if elements >= 5 then 5 else 0;
when '00110' numElem = if elements >= 6 then 6 else 0;
when '00111' numElem = if elements >= 7 then 7 else 0;
when '01000' numElem = if elements >= 8 then 8 else 0;
when '01001' numElem = if elements >= 16 then 16 else 0;
when '01010' numElem = if elements >= 32 then 32 else 0;
when '01011' numElem = if elements >= 64 then 64 else 0;
when '01100' numElem = if elements >= 128 then 128 else 0;
when '01101' numElem = if elements >= 256 then 256 else 0;
when '11101' numElem = elements - (elements MOD 4);
when '11110' numElem = elements - (elements MOD 3);
when '11111' numElem = elements;
otherwise numElem = 0;
return numElem;(value, psize);
return;
// ElemFFR[] - non-assignment form
// ===============================
// ElemP[] - non-assignment form
// =============================
bit ElemFFR[integer e, integer esize]
returnElemP[bits(N) pred, integer e, integer esize]
integer n = e * (esize DIV 8);
assert n >= 0 && n < N;
return pred<n>;
// ElemP[] - assignment form
// ========================= ElemP[_FFR, e, esize];
// ElemFFR[] - assignment form
// ===========================
ElemFFR[integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= PL;
_FFR<n+psize-1:n> =ElemP[bits(N) &pred, integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= N;
pred<n+psize-1:n> = ZeroExtend(value, psize);
return;
// ElemP[] - non-assignment form
// =============================
// FFR[] - non-assignment form
// ===========================
bitbits(width) ElemP[bits(N) pred, integer e, integer esize]
integer n = e * (esize DIV 8);
assert n >= 0 && n < N;
return pred<n>;
// ElemP[] - assignment form
// =========================FFR[]
assert width ==
PL;
return _FFR<width-1:0>;
// FFR[] - assignment form
// =======================
FFR[] = bits(width) value
assert width == PL;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPERElemP[bits(N) &pred, integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= N;
pred<n+psize-1:n> =) then
_FFR = ZeroExtend(value, psize);
return;(value);
else
_FFR<width-1:0> = value;
// FFR[] - non-assignment form
// ===========================
// FPCompareNE()
// =============
bits(width)boolean FFR[]
assert width ==FPCompareNE(bits(N) op1, bits(N) op2, PLFPCRType;
return _FFR<width-1:0>;
// FFR[] - assignment form
// =======================fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =
FFR[] = bits(width) value
assert width ==(op1, fpcr);
(type2,sign2,value2) = PLFPUnpack;
if(op2, fpcr);
op1_nan = type1 IN { ConstrainUnpredictableBoolFPType_SNaN(,Unpredictable_SVEZEROUPPERFPType_QNaN) then
_FFR =};
op2_nan = type2 IN { , FPType_QNaN};
if op1_nan || op2_nan then
result = TRUE;
if type1 == FPType_SNaN || type2 == FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
else // All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 != value2);
FPProcessDenormsZeroExtendFPType_SNaN(value);
else
_FFR<width-1:0> = value;(type1, type2, N, fpcr);
return result;
// FPCompareNE()
// FPCompareUN()
// =============
boolean FPCompareNE(bits(N) op1, bits(N) op2,FPCompareUN(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
op1_nan = type1 IN {
if type1 ==FPType_SNaN,|| type2 == FPType_QNaNFPType_SNaN};
op2_nan = type2 IN {thenFPProcessException(FPExc_InvalidOp, fpcr);
result = type1 IN {FPType_SNaN, FPType_QNaN};
if op1_nan || op2_nan then
result = TRUE;
if type1 ==} || type2 IN { FPType_SNaN || type2 ==, FPType_SNaNFPType_QNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
else // All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 != value2);};
if !result then
FPProcessDenorms(type1, type2, N, fpcr);
return result;
// FPCompareUN()
// =============
// FPConvertSVE()
// ==============
booleanbits(M) FPCompareUN(bits(N) op1, bits(N) op2,FPConvertSVE(bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =fpcr, FPUnpackFPRounding(op1, fpcr);
(type2,sign2,value2) =rounding)
fpcr.AHP = '0';
return FPUnpackFPConvert(op2, fpcr);
(op, fpcr, rounding);
if type1 ==// FPConvertSVE()
// ==============
bits(M) FPType_SNaN || type2 ==FPConvertSVE(bits(N) op, FPType_SNaNFPCRType thenfpcr)
fpcr.AHP = '0';
return
FPProcessExceptionFPConvert((op, fpcr,FPExc_InvalidOpFPRoundingMode, fpcr);
result = type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN};
if !result then
FPProcessDenorms(type1, type2, N, fpcr);
return result;(fpcr));
// FPConvertSVE()
// ==============
// FPExpA()
// ========
bits(M)bits(N) FPConvertSVE(bits(N) op,FPExpA(bits(N) op)
assert N IN {16,32,64};
bits(N) result;
bits(N) coeff;
integer idx = if N == 16 then FPCRTypeUInt fpcr,(op<4:0>) else FPRoundingUInt rounding)
fpcr.AHP = '0';
return(op<5:0>);
coeff = FPConvertFPExpCoefficient(op, fpcr, rounding);
// FPConvertSVE()
// ==============
bits(M) FPConvertSVE(bits(N) op, FPCRType fpcr)
fpcr.AHP = '0';
return FPConvert(op, fpcr, FPRoundingMode(fpcr));[idx];
if N == 16 then
result<15:0> = '0':op<9:5>:coeff<9:0>;
elsif N == 32 then
result<31:0> = '0':op<13:6>:coeff<22:0>;
else // N == 64
result<63:0> = '0':op<16:6>:coeff<51:0>;
return result;
// FPExpA()
// ========
// FPExpCoefficient()
// ==================
bits(N) FPExpA(bits(N) op)
FPExpCoefficient[integer index]
assert N IN {16,32,64};
bits(N) result;
bits(N) coeff;
integer idx = if N == 16 then integer result;
if N == 16 then
case index of
when 0 result = 0x0000;
when 1 result = 0x0016;
when 2 result = 0x002d;
when 3 result = 0x0045;
when 4 result = 0x005d;
when 5 result = 0x0075;
when 6 result = 0x008e;
when 7 result = 0x00a8;
when 8 result = 0x00c2;
when 9 result = 0x00dc;
when 10 result = 0x00f8;
when 11 result = 0x0114;
when 12 result = 0x0130;
when 13 result = 0x014d;
when 14 result = 0x016b;
when 15 result = 0x0189;
when 16 result = 0x01a8;
when 17 result = 0x01c8;
when 18 result = 0x01e8;
when 19 result = 0x0209;
when 20 result = 0x022b;
when 21 result = 0x024e;
when 22 result = 0x0271;
when 23 result = 0x0295;
when 24 result = 0x02ba;
when 25 result = 0x02e0;
when 26 result = 0x0306;
when 27 result = 0x032e;
when 28 result = 0x0356;
when 29 result = 0x037f;
when 30 result = 0x03a9;
when 31 result = 0x03d4;
elsif N == 32 then
case index of
when 0 result = 0x000000;
when 1 result = 0x0164d2;
when 2 result = 0x02cd87;
when 3 result = 0x043a29;
when 4 result = 0x05aac3;
when 5 result = 0x071f62;
when 6 result = 0x08980f;
when 7 result = 0x0a14d5;
when 8 result = 0x0b95c2;
when 9 result = 0x0d1adf;
when 10 result = 0x0ea43a;
when 11 result = 0x1031dc;
when 12 result = 0x11c3d3;
when 13 result = 0x135a2b;
when 14 result = 0x14f4f0;
when 15 result = 0x16942d;
when 16 result = 0x1837f0;
when 17 result = 0x19e046;
when 18 result = 0x1b8d3a;
when 19 result = 0x1d3eda;
when 20 result = 0x1ef532;
when 21 result = 0x20b051;
when 22 result = 0x227043;
when 23 result = 0x243516;
when 24 result = 0x25fed7;
when 25 result = 0x27cd94;
when 26 result = 0x29a15b;
when 27 result = 0x2b7a3a;
when 28 result = 0x2d583f;
when 29 result = 0x2f3b79;
when 30 result = 0x3123f6;
when 31 result = 0x3311c4;
when 32 result = 0x3504f3;
when 33 result = 0x36fd92;
when 34 result = 0x38fbaf;
when 35 result = 0x3aff5b;
when 36 result = 0x3d08a4;
when 37 result = 0x3f179a;
when 38 result = 0x412c4d;
when 39 result = 0x4346cd;
when 40 result = 0x45672a;
when 41 result = 0x478d75;
when 42 result = 0x49b9be;
when 43 result = 0x4bec15;
when 44 result = 0x4e248c;
when 45 result = 0x506334;
when 46 result = 0x52a81e;
when 47 result = 0x54f35b;
when 48 result = 0x5744fd;
when 49 result = 0x599d16;
when 50 result = 0x5bfbb8;
when 51 result = 0x5e60f5;
when 52 result = 0x60ccdf;
when 53 result = 0x633f89;
when 54 result = 0x65b907;
when 55 result = 0x68396a;
when 56 result = 0x6ac0c7;
when 57 result = 0x6d4f30;
when 58 result = 0x6fe4ba;
when 59 result = 0x728177;
when 60 result = 0x75257d;
when 61 result = 0x77d0df;
when 62 result = 0x7a83b3;
when 63 result = 0x7d3e0c;
else // N == 64
case index of
when 0 result = 0x0000000000000;
when 1 result = 0x02C9A3E778061;
when 2 result = 0x059B0D3158574;
when 3 result = 0x0874518759BC8;
when 4 result = 0x0B5586CF9890F;
when 5 result = 0x0E3EC32D3D1A2;
when 6 result = 0x11301D0125B51;
when 7 result = 0x1429AAEA92DE0;
when 8 result = 0x172B83C7D517B;
when 9 result = 0x1A35BEB6FCB75;
when 10 result = 0x1D4873168B9AA;
when 11 result = 0x2063B88628CD6;
when 12 result = 0x2387A6E756238;
when 13 result = 0x26B4565E27CDD;
when 14 result = 0x29E9DF51FDEE1;
when 15 result = 0x2D285A6E4030B;
when 16 result = 0x306FE0A31B715;
when 17 result = 0x33C08B26416FF;
when 18 result = 0x371A7373AA9CB;
when 19 result = 0x3A7DB34E59FF7;
when 20 result = 0x3DEA64C123422;
when 21 result = 0x4160A21F72E2A;
when 22 result = 0x44E086061892D;
when 23 result = 0x486A2B5C13CD0;
when 24 result = 0x4BFDAD5362A27;
when 25 result = 0x4F9B2769D2CA7;
when 26 result = 0x5342B569D4F82;
when 27 result = 0x56F4736B527DA;
when 28 result = 0x5AB07DD485429;
when 29 result = 0x5E76F15AD2148;
when 30 result = 0x6247EB03A5585;
when 31 result = 0x6623882552225;
when 32 result = 0x6A09E667F3BCD;
when 33 result = 0x6DFB23C651A2F;
when 34 result = 0x71F75E8EC5F74;
when 35 result = 0x75FEB564267C9;
when 36 result = 0x7A11473EB0187;
when 37 result = 0x7E2F336CF4E62;
when 38 result = 0x82589994CCE13;
when 39 result = 0x868D99B4492ED;
when 40 result = 0x8ACE5422AA0DB;
when 41 result = 0x8F1AE99157736;
when 42 result = 0x93737B0CDC5E5;
when 43 result = 0x97D829FDE4E50;
when 44 result = 0x9C49182A3F090;
when 45 result = 0xA0C667B5DE565;
when 46 result = 0xA5503B23E255D;
when 47 result = 0xA9E6B5579FDBF;
when 48 result = 0xAE89F995AD3AD;
when 49 result = 0xB33A2B84F15FB;
when 50 result = 0xB7F76F2FB5E47;
when 51 result = 0xBCC1E904BC1D2;
when 52 result = 0xC199BDD85529C;
when 53 result = 0xC67F12E57D14B;
when 54 result = 0xCB720DCEF9069;
when 55 result = 0xD072D4A07897C;
when 56 result = 0xD5818DCFBA487;
when 57 result = 0xDA9E603DB3285;
when 58 result = 0xDFC97337B9B5F;
when 59 result = 0xE502EE78B3FF6;
when 60 result = 0xEA4AFA2A490DA;
when 61 result = 0xEFA1BEE615A27;
when 62 result = 0xF50765B6E4540;
when 63 result = 0xFA7C1819E90D8;
return result<N-1:0>; UInt(op<4:0>) else UInt(op<5:0>);
coeff = FPExpCoefficient[idx];
if N == 16 then
result<15:0> = '0':op<9:5>:coeff<9:0>;
elsif N == 32 then
result<31:0> = '0':op<13:6>:coeff<22:0>;
else // N == 64
result<63:0> = '0':op<16:6>:coeff<51:0>;
return result;
// FPExpCoefficient()
// ==================
// FPMinNormal()
// =============
bits(N) FPExpCoefficient[integer index]
FPMinNormal(bit sign)
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x0000;
when 1 result = 0x0016;
when 2 result = 0x002d;
when 3 result = 0x0045;
when 4 result = 0x005d;
when 5 result = 0x0075;
when 6 result = 0x008e;
when 7 result = 0x00a8;
when 8 result = 0x00c2;
when 9 result = 0x00dc;
when 10 result = 0x00f8;
when 11 result = 0x0114;
when 12 result = 0x0130;
when 13 result = 0x014d;
when 14 result = 0x016b;
when 15 result = 0x0189;
when 16 result = 0x01a8;
when 17 result = 0x01c8;
when 18 result = 0x01e8;
when 19 result = 0x0209;
when 20 result = 0x022b;
when 21 result = 0x024e;
when 22 result = 0x0271;
when 23 result = 0x0295;
when 24 result = 0x02ba;
when 25 result = 0x02e0;
when 26 result = 0x0306;
when 27 result = 0x032e;
when 28 result = 0x0356;
when 29 result = 0x037f;
when 30 result = 0x03a9;
when 31 result = 0x03d4;
elsif N == 32 then
case index of
when 0 result = 0x000000;
when 1 result = 0x0164d2;
when 2 result = 0x02cd87;
when 3 result = 0x043a29;
when 4 result = 0x05aac3;
when 5 result = 0x071f62;
when 6 result = 0x08980f;
when 7 result = 0x0a14d5;
when 8 result = 0x0b95c2;
when 9 result = 0x0d1adf;
when 10 result = 0x0ea43a;
when 11 result = 0x1031dc;
when 12 result = 0x11c3d3;
when 13 result = 0x135a2b;
when 14 result = 0x14f4f0;
when 15 result = 0x16942d;
when 16 result = 0x1837f0;
when 17 result = 0x19e046;
when 18 result = 0x1b8d3a;
when 19 result = 0x1d3eda;
when 20 result = 0x1ef532;
when 21 result = 0x20b051;
when 22 result = 0x227043;
when 23 result = 0x243516;
when 24 result = 0x25fed7;
when 25 result = 0x27cd94;
when 26 result = 0x29a15b;
when 27 result = 0x2b7a3a;
when 28 result = 0x2d583f;
when 29 result = 0x2f3b79;
when 30 result = 0x3123f6;
when 31 result = 0x3311c4;
when 32 result = 0x3504f3;
when 33 result = 0x36fd92;
when 34 result = 0x38fbaf;
when 35 result = 0x3aff5b;
when 36 result = 0x3d08a4;
when 37 result = 0x3f179a;
when 38 result = 0x412c4d;
when 39 result = 0x4346cd;
when 40 result = 0x45672a;
when 41 result = 0x478d75;
when 42 result = 0x49b9be;
when 43 result = 0x4bec15;
when 44 result = 0x4e248c;
when 45 result = 0x506334;
when 46 result = 0x52a81e;
when 47 result = 0x54f35b;
when 48 result = 0x5744fd;
when 49 result = 0x599d16;
when 50 result = 0x5bfbb8;
when 51 result = 0x5e60f5;
when 52 result = 0x60ccdf;
when 53 result = 0x633f89;
when 54 result = 0x65b907;
when 55 result = 0x68396a;
when 56 result = 0x6ac0c7;
when 57 result = 0x6d4f30;
when 58 result = 0x6fe4ba;
when 59 result = 0x728177;
when 60 result = 0x75257d;
when 61 result = 0x77d0df;
when 62 result = 0x7a83b3;
when 63 result = 0x7d3e0c;
else // N == 64
case index of
when 0 result = 0x0000000000000;
when 1 result = 0x02C9A3E778061;
when 2 result = 0x059B0D3158574;
when 3 result = 0x0874518759BC8;
when 4 result = 0x0B5586CF9890F;
when 5 result = 0x0E3EC32D3D1A2;
when 6 result = 0x11301D0125B51;
when 7 result = 0x1429AAEA92DE0;
when 8 result = 0x172B83C7D517B;
when 9 result = 0x1A35BEB6FCB75;
when 10 result = 0x1D4873168B9AA;
when 11 result = 0x2063B88628CD6;
when 12 result = 0x2387A6E756238;
when 13 result = 0x26B4565E27CDD;
when 14 result = 0x29E9DF51FDEE1;
when 15 result = 0x2D285A6E4030B;
when 16 result = 0x306FE0A31B715;
when 17 result = 0x33C08B26416FF;
when 18 result = 0x371A7373AA9CB;
when 19 result = 0x3A7DB34E59FF7;
when 20 result = 0x3DEA64C123422;
when 21 result = 0x4160A21F72E2A;
when 22 result = 0x44E086061892D;
when 23 result = 0x486A2B5C13CD0;
when 24 result = 0x4BFDAD5362A27;
when 25 result = 0x4F9B2769D2CA7;
when 26 result = 0x5342B569D4F82;
when 27 result = 0x56F4736B527DA;
when 28 result = 0x5AB07DD485429;
when 29 result = 0x5E76F15AD2148;
when 30 result = 0x6247EB03A5585;
when 31 result = 0x6623882552225;
when 32 result = 0x6A09E667F3BCD;
when 33 result = 0x6DFB23C651A2F;
when 34 result = 0x71F75E8EC5F74;
when 35 result = 0x75FEB564267C9;
when 36 result = 0x7A11473EB0187;
when 37 result = 0x7E2F336CF4E62;
when 38 result = 0x82589994CCE13;
when 39 result = 0x868D99B4492ED;
when 40 result = 0x8ACE5422AA0DB;
when 41 result = 0x8F1AE99157736;
when 42 result = 0x93737B0CDC5E5;
when 43 result = 0x97D829FDE4E50;
when 44 result = 0x9C49182A3F090;
when 45 result = 0xA0C667B5DE565;
when 46 result = 0xA5503B23E255D;
when 47 result = 0xA9E6B5579FDBF;
when 48 result = 0xAE89F995AD3AD;
when 49 result = 0xB33A2B84F15FB;
when 50 result = 0xB7F76F2FB5E47;
when 51 result = 0xBCC1E904BC1D2;
when 52 result = 0xC199BDD85529C;
when 53 result = 0xC67F12E57D14B;
when 54 result = 0xCB720DCEF9069;
when 55 result = 0xD072D4A07897C;
when 56 result = 0xD5818DCFBA487;
when 57 result = 0xDA9E603DB3285;
when 58 result = 0xDFC97337B9B5F;
when 59 result = 0xE502EE78B3FF6;
when 60 result = 0xEA4AFA2A490DA;
when 61 result = 0xEFA1BEE615A27;
when 62 result = 0xF50765B6E4540;
when 63 result = 0xFA7C1819E90D8;
return result<N-1:0>; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =Zeros(E-1):'1';
frac = Zeros(F);
return sign : exp : frac;
// FPMinNormal()
// =============
// FPOne()
// =======
bits(N) FPMinNormal(bit sign)
FPOne(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = exp = '0': ZerosOnes(E-1):'1';
(E-1);
frac = Zeros(F);
return sign : exp : frac;
// FPOne()
// =======
// FPPointFive()
// =============
bits(N) FPOne(bit sign)
FPPointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':Ones(E-1);
(E-2):'0';
frac = Zeros(F);
return sign : exp : frac;
// FPPointFive()
// =============
// FPProcess()
// ===========
bits(N) FPPointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':FPProcess(bits(N) input)
bits(N) result;
assert N IN {16,32,64};OnesFPCRType(E-2):'0';
frac =fpcr = FPCR[];
(fptype,sign,value) = (input, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, input, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRound(value, fpcr);
FPProcessDenormZerosFPUnpack(F);
return sign : exp : frac;(fptype, N, fpcr);
return result;
// FPProcess()
// ===========
// FPScale()
// =========
bits(N) FPProcess(bits(N) input)
bits(N) result;
assert N IN {16,32,64};FPScale(bits (N) op, integer scale,
FPCRType fpcr = FPCR[];
fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPUnpack(input, fpcr);
(op, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, input, fpcr);
(fptype, op, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRound(value, fpcr);(value * (2.0^scale), fpcr);
FPProcessDenorm(fptype, N, fpcr);
return result;
// FPScale()
// =========
// FPTrigMAdd()
// ============
bits(N) FPScale(bits (N) op, integer scale,FPTrigMAdd(integer x, bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = assert x >= 0;
assert x < 8;
bits(N) coeff;
if op2<N-1> == '1' then
x = x + 8;
coeff = FPTrigMAddCoefficient[x];
boolean altfp = HaveAltFP() && fpcr.AH == '1';
if altfp then
(fptype,-,-) = FPUnpack(op, fpcr);
if fptype ==(op2, fpcr, FALSE);
if !(fptype IN { FPType_SNaN || fptype ==, FPType_QNaN then
result =}) then
op2<N-1> = '0';
else
op2<N-1> = '0';
result = FPProcessNaNFPMulAdd(fptype, op, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
else
result = FPRound(value * (2.0^scale), fpcr);
FPProcessDenorm(fptype, N, fpcr);
(coeff, op1, op2, fpcr);
return result;
// FPTrigMAdd()
// ============
// FPTrigMAddCoefficient()
// =======================
bits(N) FPTrigMAdd(integer x, bits(N) op1, bits(N) op2,FPTrigMAddCoefficient[integer index]
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x3c00;
when 1 result = 0xb155;
when 2 result = 0x2030;
when 3 result = 0x0000;
when 4 result = 0x0000;
when 5 result = 0x0000;
when 6 result = 0x0000;
when 7 result = 0x0000;
when 8 result = 0x3c00;
when 9 result = 0xb800;
when 10 result = 0x293a;
when 11 result = 0x0000;
when 12 result = 0x0000;
when 13 result = 0x0000;
when 14 result = 0x0000;
when 15 result = 0x0000;
elsif N == 32 then
case index of
when 0 result = 0x3f800000;
when 1 result = 0xbe2aaaab;
when 2 result = 0x3c088886;
when 3 result = 0xb95008b9;
when 4 result = 0x36369d6d;
when 5 result = 0x00000000;
when 6 result = 0x00000000;
when 7 result = 0x00000000;
when 8 result = 0x3f800000;
when 9 result = 0xbf000000;
when 10 result = 0x3d2aaaa6;
when 11 result = 0xbab60705;
when 12 result = 0x37cd37cc;
when 13 result = 0x00000000;
when 14 result = 0x00000000;
when 15 result = 0x00000000;
else // N == 64
case index of
when 0 result = 0x3ff0000000000000;
when 1 result = 0xbfc5555555555543;
when 2 result = 0x3f8111111110f30c;
when 3 result = 0xbf2a01a019b92fc6;
when 4 result = 0x3ec71de351f3d22b;
when 5 result = 0xbe5ae5e2b60f7b91;
when 6 result = 0x3de5d8408868552f;
when 7 result = 0x0000000000000000;
when 8 result = 0x3ff0000000000000;
when 9 result = 0xbfe0000000000000;
when 10 result = 0x3fa5555555555536;
when 11 result = 0xbf56c16c16c13a0b;
when 12 result = 0x3efa01a019b1e8d8;
when 13 result = 0xbe927e4f7282f468;
when 14 result = 0x3e21ee96d2641b13;
when 15 result = 0xbda8f76380fbb401;
return result<N-1:0>; FPCRType fpcr)
assert N IN {16,32,64};
assert x >= 0;
assert x < 8;
bits(N) coeff;
if op2<N-1> == '1' then
x = x + 8;
coeff = FPTrigMAddCoefficient[x];
op2 = FPAbs(op2);
result = FPMulAdd(coeff, op1, op2, fpcr);
return result;
// FPTrigMAddCoefficient()
// =======================
// FPTrigSMul()
// ============
bits(N) FPTrigMAddCoefficient[integer index]
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x3c00;
when 1 result = 0xb155;
when 2 result = 0x2030;
when 3 result = 0x0000;
when 4 result = 0x0000;
when 5 result = 0x0000;
when 6 result = 0x0000;
when 7 result = 0x0000;
when 8 result = 0x3c00;
when 9 result = 0xb800;
when 10 result = 0x293a;
when 11 result = 0x0000;
when 12 result = 0x0000;
when 13 result = 0x0000;
when 14 result = 0x0000;
when 15 result = 0x0000;
elsif N == 32 then
case index of
when 0 result = 0x3f800000;
when 1 result = 0xbe2aaaab;
when 2 result = 0x3c088886;
when 3 result = 0xb95008b9;
when 4 result = 0x36369d6d;
when 5 result = 0x00000000;
when 6 result = 0x00000000;
when 7 result = 0x00000000;
when 8 result = 0x3f800000;
when 9 result = 0xbf000000;
when 10 result = 0x3d2aaaa6;
when 11 result = 0xbab60705;
when 12 result = 0x37cd37cc;
when 13 result = 0x00000000;
when 14 result = 0x00000000;
when 15 result = 0x00000000;
else // N == 64
case index of
when 0 result = 0x3ff0000000000000;
when 1 result = 0xbfc5555555555543;
when 2 result = 0x3f8111111110f30c;
when 3 result = 0xbf2a01a019b92fc6;
when 4 result = 0x3ec71de351f3d22b;
when 5 result = 0xbe5ae5e2b60f7b91;
when 6 result = 0x3de5d8408868552f;
when 7 result = 0x0000000000000000;
when 8 result = 0x3ff0000000000000;
when 9 result = 0xbfe0000000000000;
when 10 result = 0x3fa5555555555536;
when 11 result = 0xbf56c16c16c13a0b;
when 12 result = 0x3efa01a019b1e8d8;
when 13 result = 0xbe927e4f7282f468;
when 14 result = 0x3e21ee96d2641b13;
when 15 result = 0xbda8f76380fbb401;
return result<N-1:0>;FPTrigSMul(bits(N) op1, bits(N) op2,FPCRType fpcr)
assert N IN {16,32,64};
result = FPMul(op1, op1, fpcr);
(fptype, sign, value) = FPUnpack(result, fpcr);
if (fptype != FPType_QNaN) && (fptype != FPType_SNaN) then
result<N-1> = op2<0>;
FPProcessDenorm(fptype, N, fpcr);
return result;
// FPTrigSMul()
// FPTrigSSel()
// ============
bits(N) FPTrigSMul(bits(N) op1, bits(N) op2,FPTrigSSel(bits(N) op1, bits(N) op2)
assert N IN {16,32,64};
bits(N) result;
if op2<0> == '1' then
result = FPCRTypeFPOne fpcr)
assert N IN {16,32,64};
result = FPMul(op1, op1, fpcr);
fpexc = FALSE;
(fptype, sign, value) = FPUnpack(result, fpcr, fpexc);
if !(fptype IN {FPType_QNaN, FPType_SNaN}) then
result<N-1> = op2<0>;
(op2<1>);
else
result = op1;
result<N-1> = result<N-1> EOR op2<1>;
return result;
// FPTrigSSel()
// ============
// FirstActive()
// =============
bits(N)bit FPTrigSSel(bits(N) op1, bits(N) op2)
assert N IN {16,32,64};
bits(N) result;
if op2<0> == '1' then
result =FirstActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
if FPOneElemP(op2<1>);
elsif op2<1> == '1' then
result =[mask, e, esize] == '1' then return FPNegElemP(op1);
else
result = op1;
return result;[x, e, esize];
return '0';
// FirstActive()
// =============
// FloorPow2()
// ===========
// For a positive integer X, return the largest power of 2 <= X
bitinteger FirstActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
ifFloorPow2(integer x)
assert x >= 0;
integer n = 1;
if x == 0 then return 0;
while x >= 2^n do
n = n + 1;
return 2^(n - 1); ElemP[mask, e, esize] == '1' then return ElemP[x, e, esize];
return '0';
// FloorPow2()
// ===========
// For a positive integer X, return the largest power of 2 <= X
// HaveSVE()
// =========
integerboolean FloorPow2(integer x)
assert x >= 0;
integer n = 1;
if x == 0 then return 0;
while x >= 2^n do
n = n + 1;
return 2^(n - 1);HaveSVE()
returnHasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";
// HaveSVE()
// =========
// HaveSVEFP32MatMulExt()
// ======================
// Returns TRUE if single-precision floating-point matrix multiply instruction support implemented and FALSE otherwise.
boolean HaveSVE()
HaveSVEFP32MatMulExt()
return HasArchVersionHaveSVE(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";() && boolean IMPLEMENTATION_DEFINED "Have SVE FP32 Matrix Multiply extension";
// HaveSVEFP32MatMulExt()
// HaveSVEFP64MatMulExt()
// ======================
// Returns TRUE if single-precision floating-point matrix multiply instruction support implemented and FALSE otherwise.
// Returns TRUE if double-precision floating-point matrix multiply instruction support implemented and FALSE otherwise.
boolean HaveSVEFP32MatMulExt()
HaveSVEFP64MatMulExt()
return HaveSVE() && boolean IMPLEMENTATION_DEFINED "Have SVE FP32 Matrix Multiply extension";() && boolean IMPLEMENTATION_DEFINED "Have SVE FP64 Matrix Multiply extension";
// HaveSVEFP64MatMulExt()
// ======================
// Returns TRUE if double-precision floating-point matrix multiply instruction support implemented and FALSE otherwise.
// ImplementedSVEVectorLength()
// ============================
// Reduce SVE vector length to a supported value (e.g. power of two)
booleaninteger HaveSVEFP64MatMulExt()
returnImplementedSVEVectorLength(integer nbits)
return integer IMPLEMENTATION_DEFINED; HaveSVE() && boolean IMPLEMENTATION_DEFINED "Have SVE FP64 Matrix Multiply extension";
// ImplementedSVEVectorLength()
// ============================
// Reduce SVE vector length to a supported value (e.g. power of two)
// IsEven()
// ========
integerboolean ImplementedSVEVectorLength(integer nbits)
return integer IMPLEMENTATION_DEFINED;IsEven(integer val)
return val MOD 2 == 0;
// IsEven()
// ========
// IsFPEnabled()
// =============
// Returns TRUE if accesses to the Advanced SIMD and floating-point
// registers are enabled at the target exception level in the current
// execution state and FALSE otherwise.
boolean IsEven(integer val)
return val MOD 2 == 0;IsFPEnabled(bits(2) el)
ifELUsingAArch32(el) then
return AArch32.IsFPEnabled(el);
else
return AArch64.IsFPEnabled(el);
// IsFPEnabled()
// =============
// Returns TRUE if accesses to the Advanced SIMD and floating-point
// registers are enabled at the target exception level in the current
// execution state and FALSE otherwise.
// IsSVEEnabled()
// ==============
// Returns TRUE if access to SVE instructions and System registers is
// enabled at the target exception level and FALSE otherwise.
boolean IsFPEnabled(bits(2) el)
IsSVEEnabled(bits(2) el)
if ELUsingAArch32(el) then
return return FALSE;
// Check if access disabled in CPACR_EL1
if el IN { AArch32.IsFPEnabledEL0(el);
else
return, } && !IsInHost() then
// Check SVE at EL0/EL1
case CPACR_EL1.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = el == EL0;
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
case CPTR_EL2.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = el == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then return FALSE;
else
if CPTR_EL2.TZ == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if HaveEL(EL3AArch64.IsFPEnabledEL1(el);) then
if CPTR_EL3.EZ == '0' then return FALSE;
return TRUE;
// IsSVEEnabled()
// ==============
// Returns TRUE if access to SVE instructions and System registers is
// enabled at the target exception level and FALSE otherwise.
// LastActive()
// ============
booleanbit IsSVEEnabled(bits(2) el)
ifLastActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = elements-1 downto 0
if ELUsingAArch32ElemP(el) then
return FALSE;
// Check if access disabled in CPACR_EL1
if el IN {[mask, e, esize] == '1' then returnEL0ElemP, EL1} && !IsInHost() then
// Check SVE at EL0/EL1
case CPACR_EL1.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = el == EL0;
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
case CPTR_EL2.ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = el == EL0 && HCR_EL2.TGE == '1';
when '11' disabled = FALSE;
if disabled then return FALSE;
else
if CPTR_EL2.TZ == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then return FALSE;
return TRUE;[x, e, esize];
return '0';
// LastActive()
// ============
// LastActiveElement()
// ===================
bitinteger LastActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = elements-1 downto 0
ifLastActiveElement(bits(N) mask, integer esize)
assert esize IN {8, 16, 32, 64};
integer elements = ElemPVL[mask, e, esize] == '1' then returnDIV esize;
for e = elements-1 downto 0
if ElemP[x, e, esize];
return '0';[mask, e, esize] == '1' then return e;
return -1;
// LastActiveElement()
// ===================
integerconstant integer LastActiveElement(bits(N) mask, integer esize)
assert esize IN {8, 16, 32, 64};
integer elements =MAX_PL = 256; VL DIV esize;
for e = elements-1 downto 0
if ElemP[mask, e, esize] == '1' then return e;
return -1;
constant integer// MaybeZeroSVEUppers()
// ==================== MAX_VL = 2048;MaybeZeroSVEUppers(bits(2) target_el)
boolean lower_enabled;
ifUInt(target_el) <= UInt(PSTATE.EL) || !IsSVEEnabled(target_el) then
return;
if target_el == EL3 then
if EL2Enabled() then
lower_enabled = IsFPEnabled(EL2);
else
lower_enabled = IsFPEnabled(EL1);
elsif target_el == EL2 then
assert !ELUsingAArch32(EL2);
if HCR_EL2.TGE == '0' then
lower_enabled = IsFPEnabled(EL1);
else
lower_enabled = IsFPEnabled(EL0);
else
assert target_el == EL1 && !ELUsingAArch32(EL1);
lower_enabled = IsFPEnabled(EL0);
if lower_enabled then
integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128;
integer pl = vl DIV 8;
for n = 0 to 31
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(_Z[n]<vl-1:0>);
for n = 0 to 15
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_P[n] = ZeroExtend(_P[n]<pl-1:0>);
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_FFR = ZeroExtend(_FFR<pl-1:0>);
// MaybeZeroSVEUppers()
// ====================// MemNF[] - non-assignment form
// =============================
(bits(8*size), boolean) MemNF[bits(64) address, integer size,
MaybeZeroSVEUppers(bits(2) target_el)
boolean lower_enabled;
acctype]
assert size IN {1, 2, 4, 8, 16};
bits(8*size) value;
if aligned = (address == UIntAlign(target_el) <=(address, size));
A = UIntSCTLR(PSTATE.EL) || ![].A;
if !aligned && (A == '1') then
return (bits(8*size) UNKNOWN, TRUE);
atomic = aligned || size == 1;
if !atomic then
(value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c =IsSVEEnabledConstrainUnpredictable(target_el) then
return;
if target_el ==( EL3Unpredictable_DEVPAGE2 then
if);
assert c IN { EL2EnabledConstraint_FAULT() then
lower_enabled =, IsFPEnabledConstraint_NONE(};
if c ==EL2Constraint_NONE);
else
lower_enabled =then aligned = TRUE;
for i = 1 to size-1
(value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
else
(value, bad) = MemSingleNF[address, size, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
if IsFPEnabledBigEndian((acctype) then
value =EL1BigEndianReverse);
elsif target_el == EL2 then
assert !ELUsingAArch32(EL2);
if HCR_EL2.TGE == '0' then
lower_enabled = IsFPEnabled(EL1);
else
lower_enabled = IsFPEnabled(EL0);
else
assert target_el == EL1 && !ELUsingAArch32(EL1);
lower_enabled = IsFPEnabled(EL0);
if lower_enabled then
integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128;
integer pl = vl DIV 8;
for n = 0 to 31
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(_Z[n]<vl-1:0>);
for n = 0 to 15
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_P[n] = ZeroExtend(_P[n]<pl-1:0>);
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_FFR = ZeroExtend(_FFR<pl-1:0>);(value);
return (value, FALSE);
// MemNF[] - non-assignment form
// =============================
// MemSingleNF[] - non-assignment form
// ===================================
(bits(8*size), boolean) MemNF[bits(64) address, integer size,(bits(8*size), boolean) MemSingleNF[bits(64) address, integer size, AccType acctype]
assert size IN {1, 2, 4, 8, 16};
acctype, boolean wasaligned]
bits(8*size) value;
aligned = (address == boolean iswrite = FALSE; AlignAddressDescriptor(address, size));
A =memaddrdesc;
// Implementation may suppress NF load for any reason
if SCTLRConstrainUnpredictableBool[].A;
if !aligned && (A == '1') then
return (bits(8*size) UNKNOWN, TRUE);
atomic = aligned || size == 1;
if !atomic then
(value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c =( ConstrainUnpredictableUnpredictable_NONFAULT() then
return (bits(8*size) UNKNOWN, TRUE);
// MMU or MPU
memaddrdesc =Unpredictable_DEVPAGE2AArch64.TranslateAddress);
assert c IN {(address, acctype, iswrite, wasaligned, size);
// Non-fault load from Device memory must not be performed externally
if memaddrdesc.memattrs.memtype ==Constraint_FAULTMemType_Device,then
return (bits(8*size) UNKNOWN, TRUE);
// Check for aborts or debug exceptions
if Constraint_NONEIsFault};
if c ==(memaddrdesc) then
return (bits(8*size) UNKNOWN, TRUE);
// Memory array access
accdesc = Constraint_NONECreateAccessDescriptor then aligned = TRUE;
for i = 1 to size-1
(value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
else
(value, bad) = MemSingleNF[address, size, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
(acctype);
if BigEndianHaveMTEExt(acctype) then
value =() then
if (address, acctype) then
bits(4) ptag = AArch64.PhysicalTag(address);
if !AArch64.CheckTagBigEndianReverseAArch64.AccessIsTagChecked(value);
(memaddrdesc, ptag, iswrite) then
return (bits(8*size) UNKNOWN, TRUE);
value = _Mem[memaddrdesc, size, accdesc, iswrite];
return (value, FALSE);
// MemSingleNF[] - non-assignment form
// ===================================
// NoneActive()
// ============
(bits(8*size), boolean) MemSingleNF[bits(64) address, integer size,bit AccType acctype, boolean wasaligned]
bits(8*size) value;
boolean iswrite = FALSE;NoneActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
if
AddressDescriptorElemP memaddrdesc;
// Implementation may suppress NF load for any reason
if[mask, e, esize] == '1' && ConstrainUnpredictableBoolElemP(Unpredictable_NONFAULT) then
return (bits(8*size) UNKNOWN, TRUE);
// MMU or MPU
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Non-fault load from Device memory must not be performed externally
if memaddrdesc.memattrs.memtype == MemType_Device then
return (bits(8*size) UNKNOWN, TRUE);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
return (bits(8*size) UNKNOWN, TRUE);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTE2Ext() then
if AArch64.AccessIsTagChecked(address, acctype) then
bits(4) ptag = AArch64.PhysicalTag(address);
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
return (bits(8*size) UNKNOWN, TRUE);
value = _Mem[memaddrdesc, size, accdesc, iswrite];
return (value, FALSE);[x, e, esize] == '1' then return '0';
return '1';
// NoneActive()
// ============
// P[] - non-assignment form
// =========================
bitbits(width) NoneActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
ifP[integer n]
assert n >= 0 && n <= 31;
assert width == ElemPPL[mask, e, esize] == '1' &&;
return _P[n]<width-1:0>;
// P[] - assignment form
// ===================== P[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == PL;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_P[n] = ZeroExtendElemP[x, e, esize] == '1' then return '0';
return '1';(value);
else
_P[n]<width-1:0> = value;
// P[] - non-assignment form
// =========================
// PL - non-assignment form
// ========================
bits(width)integer P[integer n]
assert n >= 0 && n <= 31;
assert width ==PL
return PLVL;
return _P[n]<width-1:0>;
// P[] - assignment form
// =====================
P[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == PL;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_P[n] = ZeroExtend(value);
else
_P[n]<width-1:0> = value;DIV 8;
// PL - non-assignment form
// ========================
// PredTest()
// ==========
integerbits(4) PL
returnPredTest(bits(N) mask, bits(N) result, integer esize)
bit n = (mask, result, esize);
bit z = NoneActive(mask, result, esize);
bit c = NOT LastActiveVLFirstActive DIV 8;(mask, result, esize);
bit v = '0';
return n:z:c:v;
// PredTest()
// ==========
// ReducePredicated()
// ==================
bits(4)bits(esize) PredTest(bits(N) mask, bits(N) result, integer esize)
bit n =ReducePredicated( FirstActiveReduceOp(mask, result, esize);
bit z =op, bits(N) input, bits(M) mask, bits(esize) identity)
assert(N == M * 8);
integer p2bits = NoneActiveCeilPow2(mask, result, esize);
bit c = NOT(N);
bits(p2bits) operand;
integer elements = p2bits DIV esize;
for e = 0 to elements-1
if e * esize < N && [mask, e, esize] == '1' then
Elem[operand, e, esize] = Elem[input, e, esize];
else
Elem[operand, e, esize] = identity;
return ReduceLastActiveElemP(mask, result, esize);
bit v = '0';
return n:z:c:v;(op, operand, esize);
// ReducePredicated()
// ==================
// Reverse()
// =========
// Reverse subwords of M bits in an N-bit word
bits(esize)bits(N) ReducePredicated(Reverse(bits(N) word, integer M)
bits(N) result;
integer sw = N DIV M;
assert N == sw * M;
for s = 0 to sw-1ReduceOp op, bits(N) input, bits(M) mask, bits(esize) identity)
assert(N == M * 8);
integer p2bits = CeilPow2(N);
bits(p2bits) operand;
integer elements = p2bits DIV esize;
for e = 0 to elements-1
if e * esize < N && ElemP[mask, e, esize] == '1' then
Elem[operand, e, esize] =[result, sw - 1 - s, M] = Elem[input, e, esize];
else
Elem[operand, e, esize] = identity;
return Reduce(op, operand, esize);[word, s, M];
return result;
// Reverse()
// =========
// Reverse subwords of M bits in an N-bit word
bits(N)// SVEAccessTrap()
// ===============
// Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3. Reverse(bits(N) word, integer M)
bits(N) result;
integer sw = N DIV M;
assert N == sw * M;
for s = 0 to sw-1SVEAccessTrap(bits(2) target_el)
assert
ElemUInt[result, sw - 1 - s, M] =(target_el) >= (PSTATE.EL) && target_el != EL0 && HaveEL(target_el);
route_to_el2 = target_el == EL1 && EL2Enabled() && HCR_EL2.TGE == '1';
exception = ExceptionSyndrome(Exception_SVEAccessTrap);
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
if route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeExceptionElemUInt[word, s, M];
return result;(target_el, exception, preferred_exception_return, vect_offset);
// SVEAccessTrap()
// ===============
// Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3.enumeration
SVEAccessTrap(bits(2) target_el)
assertSVECmp { UInt(target_el) >=Cmp_EQ, UInt(PSTATE.EL) && target_el !=Cmp_NE, EL0 &&Cmp_GE, HaveEL(target_el);
route_to_el2 = target_el ==Cmp_GT, EL1 &&Cmp_LT, EL2Enabled() && HCR_EL2.TGE == '1';
exception =Cmp_LE, ExceptionSyndrome(Exception_SVEAccessTrap);
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
if route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);Cmp_UN };
enumeration// SVEMoveMaskPreferred()
// ======================
// Return FALSE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single DUP instruction.
// Used as a condition for the preferred MOV<-DUPM alias.
boolean SVECmp {SVEMoveMaskPreferred(bits(13) imm13)
bits(64) imm;
(imm, -) = Cmp_EQ,(imm13<12>, imm13<5:0>, imm13<11:6>, TRUE);
// Check for 8 bit immediates
if ! Cmp_NE,(imm<7:0>) then
// Check for 'ffffffffffffffxy' or '00000000000000xy'
if Cmp_GE,(imm<63:7>) || Cmp_GT,(imm<63:7>) then
return FALSE;
// Check for 'ffffffxyffffffxy' or '000000xy000000xy'
if imm<63:32> == imm<31:0> && ( Cmp_LT,(imm<31:7>) || Cmp_LE,(imm<31:7>)) then
return FALSE;
// Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && ( (imm<15:7>) || IsOnes(imm<15:7>)) then
return FALSE;
// Check for 'xyxyxyxyxyxyxyxy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then
return FALSE;
// Check for 16 bit immediates
else
// Check for 'ffffffffffffxy00' or '000000000000xy00'
if IsZero(imm<63:15>) || IsOnes(imm<63:15>) then
return FALSE;
// Check for 'ffffxy00ffffxy00' or '0000xy000000xy00'
if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnesCmp_UN };(imm<31:7>)) then
return FALSE;
// Check for 'xy00xy00xy00xy00'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then
return FALSE;
return TRUE;
// SVEMoveMaskPreferred()
// ======================
// Return FALSE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single DUP instruction.
// Used as a condition for the preferred MOV<-DUPM alias.
booleanarray bits( SVEMoveMaskPreferred(bits(13) imm13)
bits(64) imm;
(imm, -) =) _Z[0..31];
array bits( DecodeBitMasksMAX_PL(imm13<12>, imm13<5:0>, imm13<11:6>, TRUE);
// Check for 8 bit immediates
if !) _P[0..15];
bits(IsZeroMAX_PL(imm<7:0>) then
// Check for 'ffffffffffffffxy' or '00000000000000xy'
if IsZero(imm<63:7>) || IsOnes(imm<63:7>) then
return FALSE;
// Check for 'ffffffxyffffffxy' or '000000xy000000xy'
if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnes(imm<31:7>)) then
return FALSE;
// Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (IsZero(imm<15:7>) || IsOnes(imm<15:7>)) then
return FALSE;
// Check for 'xyxyxyxyxyxyxyxy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then
return FALSE;
// Check for 16 bit immediates
else
// Check for 'ffffffffffffxy00' or '000000000000xy00'
if IsZero(imm<63:15>) || IsOnes(imm<63:15>) then
return FALSE;
// Check for 'ffffxy00ffffxy00' or '0000xy000000xy00'
if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnes(imm<31:7>)) then
return FALSE;
// Check for 'xy00xy00xy00xy00'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then
return FALSE;
return TRUE;) _FFR;
array bits(// VL - non-assignment form
// ========================
integerMAX_VL) _Z[0..31];
array bits(VL
integer vl;
if PSTATE.EL ==MAX_PLEL1) _P[0..15];
bits(|| (PSTATE.EL == && !IsInHost()) then
vl = UInt(ZCR_EL1.LEN);
if PSTATE.EL == EL2 || (PSTATE.EL == EL0 && IsInHost()) then
vl = UInt(ZCR_EL2.LEN);
elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
vl = Min(vl, UInt(ZCR_EL2.LEN));
if PSTATE.EL == EL3 then
vl = UInt(ZCR_EL3.LEN);
elsif HaveEL(EL3) then
vl = Min(vl, UInt(ZCR_EL3.LEN));
vl = (vl + 1) * 128;
vl = ImplementedSVEVectorLengthMAX_PLEL0) _FFR;(vl);
return vl;
// VL - non-assignment form
// ========================
// Z[] - non-assignment form
// =========================
integerbits(width) VL
integer vl;
if PSTATE.EL ==Z[integer n]
assert n >= 0 && n <= 31;
assert width == EL1VL || (PSTATE.EL ==;
return _Z[n]<width-1:0>;
// Z[] - assignment form
// ===================== EL0 && !Z[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width ==IsInHostVL()) then
vl =;
if UIntConstrainUnpredictableBool(ZCR_EL1.LEN);
if PSTATE.EL ==( EL2Unpredictable_SVEZEROUPPER || (PSTATE.EL ==) then
_Z[n] = EL0ZeroExtend && IsInHost()) then
vl = UInt(ZCR_EL2.LEN);
elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
vl = Min(vl, UInt(ZCR_EL2.LEN));
if PSTATE.EL == EL3 then
vl = UInt(ZCR_EL3.LEN);
elsif HaveEL(EL3) then
vl = Min(vl, UInt(ZCR_EL3.LEN));
vl = (vl + 1) * 128;
vl = ImplementedSVEVectorLength(vl);
return vl;(value);
else
_Z[n]<width-1:0> = value;
// Z[] - non-assignment form
// =========================
// CNTKCTL[] - non-assignment form
// ===============================
bits(width)CNTKCTLType Z[integer n]
assert n >= 0 && n <= 31;
assert width ==CNTKCTL[]
bits(64) r;
if VLIsInHost;
return _Z[n]<width-1:0>;
// Z[] - assignment form
// =====================
Z[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == VL;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(value);
else
_Z[n]<width-1:0> = value;() then
r = CNTHCTL_EL2;
return r;
r = CNTKCTL_EL1;
return r;
// CNTKCTL[] - non-assignment form
// ===============================
CNTKCTLTypetype CNTKCTL[]
bits(64) r;
ifCNTKCTLType; IsInHost() then
r = CNTHCTL_EL2;
return r;
r = CNTKCTL_EL1;
return r;
type// CPACR[] - non-assignment form
// =============================
CPACRType CNTKCTLType;CPACR[]
bits(64) r;
ifIsInHost() then
r = CPTR_EL2;
return r;
r = CPACR_EL1;
return r;
// CPACR[] - non-assignment form
// =============================
CPACRTypetype CPACR[]
bits(64) r;
ifCPACRType; IsInHost() then
r = CPTR_EL2;
return r;
r = CPACR_EL1;
return r;
type// ELR[] - non-assignment form
// ===========================
bits(64) CPACRType;ELR[bits(2) el]
bits(64) r;
case el of
whenEL1 r = ELR_EL1;
when EL2 r = ELR_EL2;
when EL3 r = ELR_EL3;
otherwise Unreachable();
return r;
// ELR[] - non-assignment form
// ===========================
bits(64) ELR[]
assert PSTATE.EL != EL0;
return ELR[PSTATE.EL];
// ELR[] - assignment form
// =======================
ELR[bits(2) el] = bits(64) value
bits(64) r = value;
case el of
when EL1 ELR_EL1 = r;
when EL2 ELR_EL2 = r;
when EL3 ELR_EL3 = r;
otherwise Unreachable();
return;
// ELR[] - assignment form
// =======================
ELR[] = bits(64) value
assert PSTATE.EL != EL0;
ELR[PSTATE.EL] = value;
return;
// ELR[] - non-assignment form
// ESR[] - non-assignment form
// ===========================
bits(64)ESRType ELR[bits(2) el]
ESR[bits(2) regime]
bits(64) r;
case el of
case regime of
when EL1 r = ELR_EL1;
r = ESR_EL1;
when EL2 r = ELR_EL2;
r = ESR_EL2;
when EL3 r = ELR_EL3;
r = ESR_EL3;
otherwise Unreachable();
return r;
// ELR[] - non-assignment form
// ESR[] - non-assignment form
// ===========================
bits(64)ESRType ELR[]
assert PSTATE.EL !=ESR[]
return EL0ESR;
return[ ELRS1TranslationRegime[PSTATE.EL];
()];
// ELR[] - assignment form
// ESR[] - assignment form
// =======================
ELR[bits(2) el] = bits(64) value
bits(64) r = value;
case el of
whenESR[bits(2) regime] = ESRType value
bits(64) r = value;
case regime of
when EL1 ELR_EL1 = r;
ESR_EL1 = r;
when EL2 ELR_EL2 = r;
ESR_EL2 = r;
when EL3 ELR_EL3 = r;
ESR_EL3 = r;
otherwise Unreachable();
return;
// ELR[] - assignment form
// ESR[] - assignment form
// =======================
ELR[] = bits(64) value
assert PSTATE.EL !=ESR[] = EL0ESRType;value
[S1TranslationRegimeELRESR[PSTATE.EL] = value;
return;()] = value;
// ESR[] - non-assignment form
// ===========================
ESRTypetype ESR[bits(2) regime]
bits(64) r;
case regime of
whenESRType; EL1 r = ESR_EL1;
when EL2 r = ESR_EL2;
when EL3 r = ESR_EL3;
otherwise Unreachable();
return r;
// ESR[] - non-assignment form
// ===========================
ESRType ESR[]
return ESR[S1TranslationRegime()];
// ESR[] - assignment form
// =======================
ESR[bits(2) regime] = ESRType value
bits(64) r = value;
case regime of
when EL1 ESR_EL1 = r;
when EL2 ESR_EL2 = r;
when EL3 ESR_EL3 = r;
otherwise Unreachable();
return;
// ESR[] - assignment form
// =======================
ESR[] = ESRType value
ESR[S1TranslationRegime()] = value;
type// FAR[] - non-assignment form
// ===========================
bits(64) ESRType;FAR[bits(2) regime]
bits(64) r;
case regime of
whenEL1 r = FAR_EL1;
when EL2 r = FAR_EL2;
when EL3 r = FAR_EL3;
otherwise Unreachable();
return r;
// FAR[] - non-assignment form
// ===========================
bits(64) FAR[]
return FAR[S1TranslationRegime()];
// FAR[] - assignment form
// =======================
FAR[bits(2) regime] = bits(64) value
bits(64) r = value;
case regime of
when EL1 FAR_EL1 = r;
when EL2 FAR_EL2 = r;
when EL3 FAR_EL3 = r;
otherwise Unreachable();
return;
// FAR[] - assignment form
// =======================
FAR[] = bits(64) value
FAR[S1TranslationRegime()] = value;
return;
// FAR[] - non-assignment form
// ===========================
// MAIR[] - non-assignment form
// ============================
bits(64)MAIRType FAR[bits(2) regime]
MAIR[bits(2) regime]
bits(64) r;
case regime of
when EL1 r = FAR_EL1;
r = MAIR_EL1;
when EL2 r = FAR_EL2;
r = MAIR_EL2;
when EL3 r = FAR_EL3;
r = MAIR_EL3;
otherwise Unreachable();
return r;
// FAR[] - non-assignment form
// ===========================
// MAIR[] - non-assignment form
// ============================
bits(64)MAIRType FAR[]
MAIR[]
return FARMAIR[S1TranslationRegime()];
// FAR[] - assignment form
// =======================
FAR[bits(2) regime] = bits(64) value
bits(64) r = value;
case regime of
when EL1 FAR_EL1 = r;
when EL2 FAR_EL2 = r;
when EL3 FAR_EL3 = r;
otherwise Unreachable();
return;
// FAR[] - assignment form
// =======================
FAR[] = bits(64) value
FAR[S1TranslationRegime()] = value;
return;()];
// MAIR[] - non-assignment form
// ============================
MAIRTypetype MAIR[bits(2) regime]
bits(64) r;
case regime of
whenMAIRType; EL1 r = MAIR_EL1;
when EL2 r = MAIR_EL2;
when EL3 r = MAIR_EL3;
otherwise Unreachable();
return r;
// MAIR[] - non-assignment form
// ============================
MAIRType MAIR[]
return MAIR[S1TranslationRegime()];
type// SCTLR[] - non-assignment form
// =============================
SCTLRType MAIRType;SCTLR[bits(2) regime]
bits(64) r;
case regime of
whenEL1 r = SCTLR_EL1;
when EL2 r = SCTLR_EL2;
when EL3 r = SCTLR_EL3;
otherwise Unreachable();
return r;
// SCTLR[] - non-assignment form
// =============================
SCTLRType SCTLR[]
return SCTLR[S1TranslationRegime()];
// SCTLR[] - non-assignment form
// =============================
SCTLRTypetype SCTLR[bits(2) regime]
bits(64) r;
case regime of
whenSCTLRType; EL1 r = SCTLR_EL1;
when EL2 r = SCTLR_EL2;
when EL3 r = SCTLR_EL3;
otherwise Unreachable();
return r;
// SCTLR[] - non-assignment form
// =============================
SCTLRType SCTLR[]
return SCTLR[S1TranslationRegime()];
type// VBAR[] - non-assignment form
// ============================
bits(64) SCTLRType;VBAR[bits(2) regime]
bits(64) r;
case regime of
whenEL1 r = VBAR_EL1;
when EL2 r = VBAR_EL2;
when EL3 r = VBAR_EL3;
otherwise Unreachable();
return r;
// VBAR[] - non-assignment form
// ============================
bits(64) VBAR[]
return VBAR[S1TranslationRegime()];
// VBAR[] - non-assignment form
// ============================
// AArch64.AllocationTagAccessIsEnabled()
// ======================================
// Check whether access to Allocation Tags is enabled.
bits(64)boolean VBAR[bits(2) regime]
bits(64) r;
case regime of
whenAArch64.AllocationTagAccessIsEnabled( AccType acctype)
bits(2) el = AArch64.AccessUsesEL(acctype);
if SCR_EL3.ATA == '0' && el IN {EL0, EL1 r = VBAR_EL1;
when, EL2 r = VBAR_EL2;
when} then
return FALSE;
elsif HCR_EL2.ATA == '0' && el IN { EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then
return FALSE;
elsif SCTLR_EL3.ATA == '0' && el == EL3 r = VBAR_EL3;
otherwisethen
return FALSE;
elsif SCTLR_EL2.ATA == '0' && el == UnreachableEL2();
return r;
// VBAR[] - non-assignment form
// ============================
bits(64)then
return FALSE;
elsif SCTLR_EL1.ATA == '0' && el == VBAR[]
returnthen
return FALSE;
elsif SCTLR_EL2.ATA0 == '0' && el == VBAREL0[&&() && HCR_EL2.<E2H,TGE> == '11' then
return FALSE;
elsif SCTLR_EL1.ATA0 == '0' && el == EL0 && !(EL2EnabledS1TranslationRegimeEL2Enabled()];() && HCR_EL2.<E2H,TGE> == '11') then
return FALSE;
else
return TRUE;
// AArch64.AllocationTagAccessIsEnabled()
// ======================================
// Check whether access to Allocation Tags is enabled.
boolean// AArch64.CheckSystemAccess()
// =========================== AArch64.AllocationTagAccessIsEnabled(AArch64.CheckSystemAccess(bits(2) op0, bits(3) op1, bits(4) crn,
bits(4) crm, bits(3) op2, bits(5) rt, bit read)
return;AccType acctype)
bits(2) el = AArch64.AccessUsesEL(acctype);
if SCR_EL3.ATA == '0' && el IN {EL0, EL1, EL2} then
return FALSE;
elsif HCR_EL2.ATA == '0' && el IN {EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then
return FALSE;
elsif SCTLR_EL3.ATA == '0' && el == EL3 then
return FALSE;
elsif SCTLR_EL2.ATA == '0' && el == EL2 then
return FALSE;
elsif SCTLR_EL1.ATA == '0' && el == EL1 then
return FALSE;
elsif SCTLR_EL2.ATA0 == '0' && el == EL0 && EL2Enabled() && HCR_EL2.<E2H,TGE> == '11' then
return FALSE;
elsif SCTLR_EL1.ATA0 == '0' && el == EL0 && !(EL2Enabled() && HCR_EL2.<E2H,TGE> == '11') then
return FALSE;
else
return TRUE;
// AArch64.ChooseNonExcludedTag() // ============================== // Return a tag derived from the start and the offset values, excluding // any tags in the given mask. bits(4) AArch64.ChooseNonExcludedTag(bits(4) tag, bits(4) offset, bits(16) exclude) if IsOnes(exclude) then return '0000'; if offset == '0000' then while exclude<UInt(tag)> == '1' do tag = tag + '0001'; while offset != '0000' do offset = offset - '0001'; tag = tag + '0001'; while exclude<UInt(tag)> == '1' do tag = tag + '0001'; return tag;
// AArch64.ExecutingBROrBLROrRetInstr()
// ====================================
// Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B].
// AArch64.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1E1R/WP
boolean AArch64.ExecutingBROrBLROrRetInstr()
AArch64.ExecutingATS1xPInstr()
if !HaveBTIExtHavePrivATExt() then return FALSE;
instr = ThisInstr();
if instr<31:25> == '1101011' && instr<20:16> == '11111' then
opc = instr<24:21>;
return opc != '0101';
if instr<22+:10> == '1101010100' then
op1 = instr<16+:3>;
CRn = instr<12+:4>;
CRm = instr<8+:4>;
op2 = instr<5+:3>;
return op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'};
else
return FALSE;
// AArch64.ExecutingBTIInstr()
// ===========================
// Returns TRUE if current instruction is a BTI.
// AArch64.ExecutingBROrBLROrRetInstr()
// ====================================
// Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B].
boolean AArch64.ExecutingBTIInstr()
AArch64.ExecutingBROrBLROrRetInstr()
if !HaveBTIExt() then return FALSE;
instr = ThisInstr();
if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then
CRm = instr<11:8>;
op2 = instr<7:5>;
return (CRm == '0100' && op2<0> == '0');
if instr<31:25> == '1101011' && instr<20:16> == '11111' then
opc = instr<24:21>;
return opc != '0101';
else
return FALSE;
// AArch64.ExecutingERETInstr()
// ============================
// Returns TRUE if current instruction is ERET.
// AArch64.ExecutingBTIInstr()
// ===========================
// Returns TRUE if current instruction is a BTI.
boolean AArch64.ExecutingERETInstr()
instr =AArch64.ExecutingBTIInstr()
if ! HaveBTIExt() then return FALSE;
instr = ThisInstr();
return instr<31:12> == '11010110100111110000'; if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then
CRm = instr<11:8>;
op2 = instr<7:5>;
return (CRm == '0100' && op2<0> == '0');
else
return FALSE;
// AArch64.NextRandomTagBit()
// ==========================
// Generate a random bit suitable for generating a random Allocation Tag.
// AArch64.ExecutingERETInstr()
// ============================
// Returns TRUE if current instruction is ERET.
bitboolean AArch64.NextRandomTagBit()
bits(16) lfsr = RGSR_EL1.SEED;
bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>;
RGSR_EL1.SEED = top:lfsr<15:1>;
return top;AArch64.ExecutingERETInstr()
instr =ThisInstr();
return instr<31:12> == '11010110100111110000';
// AArch64.RandomTag()
// ===================
// Generate a random Allocation Tag.
// AArch64.NextRandomTagBit()
// ==========================
// Generate a random bit suitable for generating a random Allocation Tag.
bits(4)bit AArch64.RandomTag()
bits(4) tag;
for i = 0 to 3
tag<i> =AArch64.NextRandomTagBit()
bits(16) lfsr = RGSR_EL1.SEED;
bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>;
RGSR_EL1.SEED = top:lfsr<15:1>;
return top; AArch64.NextRandomTagBit();
return tag;
// Execute a system instruction with write (source operand).// AArch64.RandomTag()
// ===================
// Generate a random Allocation Tag.
bits(4)
AArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);AArch64.RandomTag()
bits(4) tag;
for i = 0 to 3
tag<i> =AArch64.NextRandomTagBit();
return tag;
// Execute a system instruction with read (result operand).
// Returns the result of the instruction.
bits(64)// Execute a system instruction with write (source operand). AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2);AArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);
// Read from a system register and return the contents of the register.
// Execute a system instruction with read (result operand).
// Returns the result of the instruction.
bits(64) AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2);
// Write to a system register.// Read from a system register and return the contents of the register.
bits(64)
AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);
boolean BTypeCompatible;// Write to a system register.AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);
// BTypeCompatible_BTI
// ===================
// This function determines whether a given hint encoding is compatible with the current value of
// PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction.
booleanboolean BTypeCompatible; BTypeCompatible_BTI(bits(2) hintcode)
case hintcode of
when '00'
return FALSE;
when '01'
return PSTATE.BTYPE != '11';
when '10'
return PSTATE.BTYPE != '10';
when '11'
return TRUE;
// BTypeCompatible_PACIXSP()
// =========================
// Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE,
// FALSE otherwise.
// BTypeCompatible_BTI
// ===================
// This function determines whether a given hint encoding is compatible with the current value of
// PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction.
boolean BTypeCompatible_PACIXSP()
if PSTATE.BTYPE IN {'01', '10'} then
return TRUE;
elsif PSTATE.BTYPE == '11' then
index = if PSTATE.EL ==BTypeCompatible_BTI(bits(2) hintcode)
case hintcode of
when '00'
return FALSE;
when '01'
return PSTATE.BTYPE != '11';
when '10'
return PSTATE.BTYPE != '10';
when '11'
return TRUE; EL0 then 35 else 36;
return SCTLR[]<index> == '0';
else
return FALSE;
bits(2) BTypeNext;// BTypeCompatible_PACIXSP()
// =========================
// Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE,
// FALSE otherwise.
booleanBTypeCompatible_PACIXSP()
if PSTATE.BTYPE IN {'01', '10'} then
return TRUE;
elsif PSTATE.BTYPE == '11' then
index = if PSTATE.EL == EL0 then 35 else 36;
return SCTLR[]<index> == '0';
else
return FALSE;
// The ChooseRandomNonExcludedTag function is used when GCR_EL1.RRND == '1' to generate random
// Allocation Tags.
//
// The resulting Allocation Tag is selected from the set [0,15], excluding any Allocation Tag where
// exclude[tag_value] == 1. If 'exclude' is all Ones, the returned Allocation Tag is '0000'.
//
// This function is permitted to generate a non-deterministic selection from the set of non-excluded
// Allocation Tags. A reasonable implementation is described by the Pseudocode used when
// GCR_EL1.RRND is 0, but with a non-deterministic implementation of NextRandomTagBit(). Implementations
// may choose to behave the same as GCR_EL1.RRND=0.
bits(4)bits(2) BTypeNext; ChooseRandomNonExcludedTag(bits(16) exclude);
boolean InGuardedPage;// The ChooseRandomNonExcludedTag function is used when GCR_EL1.RRND == '1' to generate random
// Allocation Tags.
//
// The resulting Allocation Tag is selected from the set [0,15], excluding any Allocation Tag where
// exclude[tag_value] == 1. If 'exclude' is all Ones, the returned Allocation Tag is '0000'.
//
// This function is permitted to generate a non-deterministic selection from the set of non-excluded
// Allocation Tags. A reasonable implementation is described by the Pseudocode used when
// GCR_EL1.RRND is 0, but with a non-deterministic implementation of NextRandomTagBit(). Implementations
// may choose to behave the same as GCR_EL1.RRND=0.
bits(4)ChooseRandomNonExcludedTag(bits(16) exclude);
// IsHCRXEL2Enabled()
// ==================
// Returns TRUE if access to HCRX_EL2 register is enabled, and FALSE otherwise.
// Indirect read of HCRX_EL2 returns 0 when access is not enabled.
booleanboolean InGuardedPage; IsHCRXEL2Enabled()
assert(HaveFeatHCX());
if HaveEL(EL3) && SCR_EL3.HXEn == '0' then
return FALSE;
return EL2Enabled();
// SetBTypeCompatible() // ==================== // Sets the value of BTypeCompatible global variable used by BTI SetBTypeCompatible(boolean x) BTypeCompatible = x;
// SetBTypeNext() // ============== // Set the value of BTypeNext global variable used by BTI SetBTypeNext(bits(2) x) BTypeNext = x;
// SetInGuardedPage()
// ==================
// Global state updated to denote if memory access is from a guarded page.// AArch64.ExceptionReturn()
// =========================
SetInGuardedPage(boolean guardedpage)
InGuardedPage = guardedpage;AArch64.ExceptionReturn(bits(64) new_pc, bits(64) spsr)SynchronizeContext();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
// Attempts to change to an illegal state will invoke the Illegal Execution state mechanism
bits(2) source_el = PSTATE.EL;
SetPSTATEFromPSR(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then
// If the exception return is illegal, PC[63:32,1:0] are UNKNOWN
new_pc<63:32> = bits(32) UNKNOWN;
new_pc<1:0> = bits(2) UNKNOWN;
elsif UsingAArch32() then // Return to AArch32
// ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
else // Return to AArch64
// ELR_ELx[63:56] might include a tag
new_pc = AArch64.BranchAddr(new_pc);
if UsingAArch32() then
// 32 most significant bits are ignored.
BranchTo(new_pc<31:0>, BranchType_ERET);
else
BranchToAddr(new_pc, BranchType_ERET);
// AArch64.ExceptionReturn()
// =========================enumeration
AArch64.ExceptionReturn(bits(64) new_pc, bits(64) spsr)
sync_errors =CountOp { HaveIESB() &&CountOp_CLZ, SCTLR[].IESB == '1';
ifCountOp_CLS, HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
SynchronizeContext();
// Attempts to change to an illegal state will invoke the Illegal Execution state mechanism
bits(2) source_el = PSTATE.EL;
SetPSTATEFromPSR(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then
// If the exception return is illegal, PC[63:32,1:0] are UNKNOWN
new_pc<63:32> = bits(32) UNKNOWN;
new_pc<1:0> = bits(2) UNKNOWN;
elsif UsingAArch32() then // Return to AArch32
// ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
else // Return to AArch64
// ELR_ELx[63:56] might include a tag
new_pc = AArch64.BranchAddr(new_pc);
if UsingAArch32() then
// 32 most significant bits are ignored.
BranchTo(new_pc<31:0>, BranchType_ERET);
else
BranchToAddr(new_pc, BranchType_ERET);CountOp_CNT};
enumeration// DecodeRegExtend()
// =================
// Decode a register extension option
ExtendType CountOp {DecodeRegExtend(bits(3) op)
case op of
when '000' returnCountOp_CLZ,;
when '001' return CountOp_CLS,;
when '010' return ;
when '011' return ExtendType_UXTX;
when '100' return ExtendType_SXTB;
when '101' return ExtendType_SXTH;
when '110' return ExtendType_SXTW;
when '111' return ExtendType_SXTXCountOp_CNT};;
// DecodeRegExtend()
// =================
// Decode a register extension option
// ExtendReg()
// ===========
// Perform a register extension and shift
ExtendTypebits(N) DecodeRegExtend(bits(3) op)
case op of
when '000' returnExtendReg(integer reg, ExtendType exttype, integer shift)
assert shift >= 0 && shift <= 4;
bits(N) val = X[reg];
boolean unsigned;
integer len;
case exttype of
when ExtendType_SXTB unsigned = FALSE; len = 8;
when ExtendType_SXTH unsigned = FALSE; len = 16;
when ExtendType_SXTW unsigned = FALSE; len = 32;
when ExtendType_SXTX unsigned = FALSE; len = 64;
when ExtendType_UXTB;
when '001' returnunsigned = TRUE; len = 8;
when ExtendType_UXTH;
when '010' returnunsigned = TRUE; len = 16;
when ExtendType_UXTW;
when '011' returnunsigned = TRUE; len = 32;
when ExtendType_UXTX;
when '100' returnunsigned = TRUE; len = 64;
// Note the extended width of the intermediate value and
// that sign extension occurs from bit <len+shift-1>, not
// from bit <len-1>. This is equivalent to the instruction
// [SU]BFIZ Rtmp, Rreg, #shift, #len
// It may also be seen as a sign/zero extend followed by a shift:
// LSL(Extend(val<len-1:0>, N, unsigned), shift);
len = ExtendType_SXTBMin;
when '101' return(len, N - shift);
return ExtendType_SXTHExtend;
when '110' return(val<len-1:0> : ExtendType_SXTWZeros;
when '111' return ExtendType_SXTX;(shift), N, unsigned);
// ExtendReg()
// ===========
// Perform a register extension and shift
bits(N)enumeration ExtendReg(integer reg,ExtendType { ExtendType exttype, integer shift)
assert shift >= 0 && shift <= 4;
bits(N) val =ExtendType_SXTB, X[reg];
boolean unsigned;
integer len;
case exttype of
whenExtendType_SXTH, ExtendType_SXTB unsigned = FALSE; len = 8;
whenExtendType_SXTW, ExtendType_SXTH unsigned = FALSE; len = 16;
whenExtendType_SXTX, ExtendType_SXTW unsigned = FALSE; len = 32;
whenExtendType_UXTB, ExtendType_SXTX unsigned = FALSE; len = 64;
whenExtendType_UXTH, ExtendType_UXTB unsigned = TRUE; len = 8;
whenExtendType_UXTW, ExtendType_UXTH unsigned = TRUE; len = 16;
when ExtendType_UXTW unsigned = TRUE; len = 32;
when ExtendType_UXTX unsigned = TRUE; len = 64;
// Note the extended width of the intermediate value and
// that sign extension occurs from bit <len+shift-1>, not
// from bit <len-1>. This is equivalent to the instruction
// [SU]BFIZ Rtmp, Rreg, #shift, #len
// It may also be seen as a sign/zero extend followed by a shift:
// LSL(Extend(val<len-1:0>, N, unsigned), shift);
len = Min(len, N - shift);
return Extend(val<len-1:0> : Zeros(shift), N, unsigned);ExtendType_UXTX};
enumeration ExtendType {FPMaxMinOp {ExtendType_SXTB,FPMaxMinOp_MAX, ExtendType_SXTH,FPMaxMinOp_MIN, ExtendType_SXTW,FPMaxMinOp_MAXNUM, ExtendType_SXTX,FPMaxMinOp_MINNUM};
ExtendType_UXTB, ExtendType_UXTH, ExtendType_UXTW, ExtendType_UXTX};
enumeration FPMaxMinOp {FPUnaryOp {FPMaxMinOp_MAX,FPUnaryOp_ABS, FPMaxMinOp_MIN,FPUnaryOp_MOV,
FPMaxMinOp_MAXNUM,FPUnaryOp_NEG, FPMaxMinOp_MINNUM};FPUnaryOp_SQRT};
enumeration FPUnaryOp {FPConvOp {FPUnaryOp_ABS,FPConvOp_CVT_FtoI, FPUnaryOp_MOV,FPConvOp_CVT_ItoF,
FPUnaryOp_NEG,FPConvOp_MOV_FtoI, FPUnaryOp_SQRT};FPConvOp_MOV_ItoF
,FPConvOp_CVT_FtoI_JS
};
enumeration// BFXPreferred()
// ==============
//
// Return TRUE if UBFX or SBFX is the preferred disassembly of a
// UBFM or SBFM bitfield instruction. Must exclude more specific
// aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR.
boolean FPConvOp {BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr)
integer S =FPConvOp_CVT_FtoI,(imms);
integer R = FPConvOp_CVT_ItoF,(immr);
// must not match UBFIZ/SBFIX alias
if
FPConvOp_MOV_FtoI,(imms) < FPConvOp_MOV_ItoF
, FPConvOp_CVT_FtoI_JS
};(immr) then
return FALSE;
// must not match LSR/ASR/LSL alias (imms == 31 or 63)
if imms == sf:'11111' then
return FALSE;
// must not match UXTx/SXTx alias
if immr == '000000' then
// must not match 32-bit UXT[BH] or SXT[BH]
if sf == '0' && imms IN {'000111', '001111'} then
return FALSE;
// must not match 64-bit SXT[BHW]
if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then
return FALSE;
// must be UBFX/SBFX alias
return TRUE;
// BFXPreferred()
// ==============
//
// Return TRUE if UBFX or SBFX is the preferred disassembly of a
// UBFM or SBFM bitfield instruction. Must exclude more specific
// aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR.
// DecodeBitMasks()
// ================
boolean// Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure
(bits(M), bits(M)) BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr)
integer S =DecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate)
bits(64) tmask, wmask;
bits(6) tmask_and, wmask_and;
bits(6) tmask_or, wmask_or;
bits(6) levels;
// Compute log2 of element size
// 2^len must be in range [2, M]
len = UIntHighestSetBit(imms);
integer R =(immN:NOT(imms));
if len < 1 then UNDEFINED;
assert M >= (1 << len);
// Determine S, R and S - R parameters
levels = ZeroExtend(Ones(len), 6);
// For logical immediates an all-ones value of S is reserved
// since it would generate a useless all-ones result (many times)
if immediate && (imms AND levels) == levels then
UNDEFINED;
S = UInt(immr);
// must not match UBFIZ/SBFIX alias
if(imms AND levels);
R = UInt(imms) <(immr AND levels);
diff = S - R; // 6-bit subtract with borrow
// From a software perspective, the remaining code is equivalant to:
// esize = 1 << len;
// d = UInt(diff<len-1:0>);
// welem = ZeroExtend(Ones(S + 1), esize);
// telem = ZeroExtend(Ones(d + 1), esize);
// wmask = Replicate(ROR(welem, R));
// tmask = Replicate(telem);
// return (wmask, tmask);
// Compute "top mask"
tmask_and = diff<5:0> OR NOT(levels);
tmask_or = diff<5:0> AND levels;
tmask = (64);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<0>, 1) : Ones(1), 32))
OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32));
// optimization of first step:
// tmask = Replicate(tmask_and<0> : '1', 32);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16))
OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8))
OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4))
OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2))
OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1))
OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1));
// Compute "wraparound mask"
wmask_and = immr OR NOT(levels);
wmask_or = immr AND levels;
wmask = Zeros(64);
wmask = ((wmask
AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32))
OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32));
// optimization of first step:
// wmask = Replicate(wmask_or<0> : '0', 32);
wmask = ((wmask
AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16))
OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16));
wmask = ((wmask
AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8))
OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8));
wmask = ((wmask
AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4))
OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4));
wmask = ((wmask
AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2))
OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2));
wmask = ((wmask
AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1))
OR Replicate(Replicate(wmask_or<5>, 32) : ZerosUIntOnes(immr) then
return FALSE;
(32), 1));
// must not match LSR/ASR/LSL alias (imms == 31 or 63)
if imms == sf:'11111' then
return FALSE;
if diff<6> != '0' then // borrow from S - R
wmask = wmask AND tmask;
else
wmask = wmask OR tmask;
// must not match UXTx/SXTx alias
if immr == '000000' then
// must not match 32-bit UXT[BH] or SXT[BH]
if sf == '0' && imms IN {'000111', '001111'} then
return FALSE;
// must not match 64-bit SXT[BHW]
if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then
return FALSE;
// must be UBFX/SBFX alias
return TRUE; return (wmask<M-1:0>, tmask<M-1:0>);
// DecodeBitMasks()
// ================
// Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure
(bits(M), bits(M))enumeration DecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate)
bits(64) tmask, wmask;
bits(6) tmask_and, wmask_and;
bits(6) tmask_or, wmask_or;
bits(6) levels;
// Compute log2 of element size
// 2^len must be in range [2, M]
len =MoveWideOp { HighestSetBit(immN:NOT(imms));
if len < 1 then UNDEFINED;
assert M >= (1 << len);
// Determine S, R and S - R parameters
levels =MoveWideOp_N, ZeroExtend(MoveWideOp_Z,Ones(len), 6);
// For logical immediates an all-ones value of S is reserved
// since it would generate a useless all-ones result (many times)
if immediate && (imms AND levels) == levels then
UNDEFINED;
S = UInt(imms AND levels);
R = UInt(immr AND levels);
diff = S - R; // 6-bit subtract with borrow
// From a software perspective, the remaining code is equivalant to:
// esize = 1 << len;
// d = UInt(diff<len-1:0>);
// welem = ZeroExtend(Ones(S + 1), esize);
// telem = ZeroExtend(Ones(d + 1), esize);
// wmask = Replicate(ROR(welem, R));
// tmask = Replicate(telem);
// return (wmask, tmask);
// Compute "top mask"
tmask_and = diff<5:0> OR NOT(levels);
tmask_or = diff<5:0> AND levels;
tmask = Ones(64);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<0>, 1) : Ones(1), 32))
OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32));
// optimization of first step:
// tmask = Replicate(tmask_and<0> : '1', 32);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16))
OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8))
OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4))
OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2))
OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1))
OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1));
// Compute "wraparound mask"
wmask_and = immr OR NOT(levels);
wmask_or = immr AND levels;
wmask = Zeros(64);
wmask = ((wmask
AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32))
OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32));
// optimization of first step:
// wmask = Replicate(wmask_or<0> : '0', 32);
wmask = ((wmask
AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16))
OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16));
wmask = ((wmask
AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8))
OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8));
wmask = ((wmask
AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4))
OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4));
wmask = ((wmask
AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2))
OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2));
wmask = ((wmask
AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1))
OR Replicate(Replicate(wmask_or<5>, 32) : Zeros(32), 1));
if diff<6> != '0' then // borrow from S - R
wmask = wmask AND tmask;
else
wmask = wmask OR tmask;
return (wmask<M-1:0>, tmask<M-1:0>);MoveWideOp_K};
enumeration// MoveWidePreferred()
// ===================
//
// Return TRUE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single MOVZ or MOVN instruction.
// Used as a condition for the preferred MOV<-ORR alias.
boolean MoveWideOp {MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr)
integer S =MoveWideOp_N,(imms);
integer R = MoveWideOp_Z, MoveWideOp_K};(immr);
integer width = if sf == '1' then 64 else 32;
// element size must equal total immediate size
if sf == '1' && immN:imms != '1xxxxxx' then
return FALSE;
if sf == '0' && immN:imms != '00xxxxx' then
return FALSE;
// for MOVZ must contain no more than 16 ones
if S < 16 then
// ones must not span halfword boundary when rotated
return (-R MOD 16) <= (15 - S);
// for MOVN must contain no more than 16 zeros
if S >= width - 15 then
// zeros must not span halfword boundary when rotated
return (R MOD 16) <= (S - (width - 15));
return FALSE;
// MoveWidePreferred()
// ===================
//
// Return TRUE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single MOVZ or MOVN instruction.
// Used as a condition for the preferred MOV<-ORR alias.
// DecodeShift()
// =============
// Decode shift encodings
booleanShiftType MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr)
integer S =DecodeShift(bits(2) op)
case op of
when '00' return UIntShiftType_LSL(imms);
integer R =;
when '01' return ;
when '10' return ShiftType_ASR;
when '11' return ShiftType_RORUIntShiftType_LSR(immr);
integer width = if sf == '1' then 64 else 32;
// element size must equal total immediate size
if sf == '1' && immN:imms != '1xxxxxx' then
return FALSE;
if sf == '0' && immN:imms != '00xxxxx' then
return FALSE;
// for MOVZ must contain no more than 16 ones
if S < 16 then
// ones must not span halfword boundary when rotated
return (-R MOD 16) <= (15 - S);
// for MOVN must contain no more than 16 zeros
if S >= width - 15 then
// zeros must not span halfword boundary when rotated
return (R MOD 16) <= (S - (width - 15));
return FALSE;;
// DecodeShift()
// =============
// Decode shift encodings
// ShiftReg()
// ==========
// Perform shift of a register operand
ShiftTypebits(N) DecodeShift(bits(2) op)
case op of
when '00' returnShiftReg(integer reg, ShiftType shiftype, integer amount)
bits(N) result = X[reg];
case shiftype of
when ShiftType_LSL;
when '01' returnresult = LSL(result, amount);
when ShiftType_LSR;
when '10' returnresult = LSR(result, amount);
when ShiftType_ASR;
when '11' returnresult = ASR(result, amount);
when ShiftType_ROR result = ROR;(result, amount);
return result;
// ShiftReg()
// ==========
// Perform shift of a register operand
bits(N)enumeration ShiftReg(integer reg,ShiftType { ShiftType shiftype, integer amount)
bits(N) result =ShiftType_LSL, X[reg];
case shiftype of
whenShiftType_LSR, ShiftType_LSL result =ShiftType_ASR, LSL(result, amount);
when ShiftType_LSR result = LSR(result, amount);
when ShiftType_ASR result = ASR(result, amount);
when ShiftType_ROR result = ROR(result, amount);
return result;ShiftType_ROR};
enumeration ShiftType {LogicalOp {ShiftType_LSL,LogicalOp_AND, ShiftType_LSR,LogicalOp_EOR, ShiftType_ASR,LogicalOp_ORR}; ShiftType_ROR};
enumeration LogicalOp {MemAtomicOp {LogicalOp_AND,MemAtomicOp_ADD, LogicalOp_EOR,MemAtomicOp_BIC, LogicalOp_ORR};MemAtomicOp_EOR,MemAtomicOp_ORR,
MemAtomicOp_SMAX,
MemAtomicOp_SMIN,
MemAtomicOp_UMAX,
MemAtomicOp_UMIN,
MemAtomicOp_SWP};
enumeration MemAtomicOp {MemOp {MemAtomicOp_ADD,MemOp_LOAD,
MemAtomicOp_BIC,MemOp_STORE,
MemAtomicOp_EOR,MemOp_PREFETCH};
MemAtomicOp_ORR,
MemAtomicOp_SMAX,
MemAtomicOp_SMIN,
MemAtomicOp_UMAX,
MemAtomicOp_UMIN,
MemAtomicOp_SWP};
enumeration// Prefetch()
// ==========
// Decode and execute the prefetch hint on ADDRESS specified by PRFOP MemOp {Prefetch(bits(64) address, bits(5) prfop)MemOp_LOAD,hint;
integer target;
boolean stream;
case prfop<4:3> of
when '00' hint = MemOp_STORE,; // PLD: prefetch for load
when '01' hint = ; // PLI: preload instructions
when '10' hint = Prefetch_WRITE; // PST: prepare for store
when '11' return; // unallocated hint
target = UInt(prfop<2:1>); // target cache level
stream = (prfop<0> != '0'); // streaming (non-temporal)
Hint_PrefetchMemOp_PREFETCH};(address, hint, target, stream);
return;
// Prefetch()
// ==========
// Decode and execute the prefetch hint on ADDRESS specified by PRFOPenumeration
Prefetch(bits(64) address, bits(5) prfop)MemBarrierOp {
PrefetchHint hint;
integer target;
boolean stream;
case prfop<4:3> of
when '00' hint =MemBarrierOp_DSB // Data Synchronization Barrier
, Prefetch_READ; // PLD: prefetch for load
when '01' hint =MemBarrierOp_DMB // Data Memory Barrier
, Prefetch_EXEC; // PLI: preload instructions
when '10' hint =MemBarrierOp_ISB // Instruction Synchronization Barrier
, Prefetch_WRITE; // PST: prepare for store
when '11' return; // unallocated hint
target =MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA
, UInt(prfop<2:1>); // target cache level
stream = (prfop<0> != '0'); // streaming (non-temporal)MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA
,
Hint_Prefetch(address, hint, target, stream);
return;MemBarrierOp_SB // Speculation Barrier
};
enumeration MemBarrierOp {SystemHintOp { MemBarrierOp_DSB // Data Synchronization Barrier
,SystemHintOp_NOP, MemBarrierOp_DMB // Data Memory Barrier
,SystemHintOp_YIELD, MemBarrierOp_ISB // Instruction Synchronization Barrier
,SystemHintOp_WFE, MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA
,SystemHintOp_WFI, MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA
,SystemHintOp_SEV, MemBarrierOp_SB // Speculation Barrier
};SystemHintOp_SEVL,SystemHintOp_DGH,
SystemHintOp_ESB,
SystemHintOp_PSB,
SystemHintOp_TSB,
SystemHintOp_BTI,
SystemHintOp_WFET,
SystemHintOp_WFIT,
SystemHintOp_CSDB
};
enumeration SystemHintOp {PSTATEField {
SystemHintOp_NOP,PSTATEField_DAIFSet,
SystemHintOp_YIELD,PSTATEField_DAIFClr,
SystemHintOp_WFE,PSTATEField_PAN, // Armv8.1
SystemHintOp_WFI,PSTATEField_UAO, // Armv8.2
SystemHintOp_SEV,PSTATEField_DIT, // Armv8.4
SystemHintOp_SEVL,PSTATEField_SSBS,
SystemHintOp_DGH,PSTATEField_TCO, // Armv8.5
SystemHintOp_ESB,PSTATEField_SP
};
SystemHintOp_PSB,
SystemHintOp_TSB,
SystemHintOp_BTI,
SystemHintOp_WFET,
SystemHintOp_WFIT,
SystemHintOp_CSDB
};
enumeration// SysOp()
// =======
SystemOp PSTATEField {SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2)
case op1:CRn:CRm:op2 of
when '000 0111 1000 000' returnPSTATEField_DAIFSet,; // S1E1R
when '100 0111 1000 000' return PSTATEField_DAIFClr,; // S1E2R
when '110 0111 1000 000' return
PSTATEField_PAN, // Armv8.1; // S1E3R
when '000 0111 1000 001' return
PSTATEField_UAO, // Armv8.2; // S1E1W
when '100 0111 1000 001' return
PSTATEField_DIT, // Armv8.4; // S1E2W
when '110 0111 1000 001' return
PSTATEField_SSBS,; // S1E3W
when '000 0111 1000 010' return
PSTATEField_TCO, // Armv8.5; // S1E0R
when '000 0111 1000 011' return
; // S1E0W
when '100 0111 1000 100' return Sys_AT; // S12E1R
when '100 0111 1000 101' return Sys_AT; // S12E1W
when '100 0111 1000 110' return Sys_AT; // S12E0R
when '100 0111 1000 111' return Sys_AT; // S12E0W
when '011 0111 0100 001' return Sys_DC; // ZVA
when '000 0111 0110 001' return Sys_DC; // IVAC
when '000 0111 0110 010' return Sys_DC; // ISW
when '011 0111 1010 001' return Sys_DC; // CVAC
when '000 0111 1010 010' return Sys_DC; // CSW
when '011 0111 1011 001' return Sys_DC; // CVAU
when '011 0111 1110 001' return Sys_DC; // CIVAC
when '000 0111 1110 010' return Sys_DC; // CISW
when '011 0111 1101 001' return Sys_DC; // CVADP
when '000 0111 0001 000' return Sys_IC; // IALLUIS
when '000 0111 0101 000' return Sys_IC; // IALLU
when '011 0111 0101 001' return Sys_IC; // IVAU
when '100 1000 0000 001' return Sys_TLBI; // IPAS2E1IS
when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS
when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS
when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS
when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS
when '000 1000 0011 001' return Sys_TLBI; // VAE1IS
when '100 1000 0011 001' return Sys_TLBI; // VAE2IS
when '110 1000 0011 001' return Sys_TLBI; // VAE3IS
when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS
when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS
when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS
when '000 1000 0011 101' return Sys_TLBI; // VALE1IS
when '100 1000 0011 101' return Sys_TLBI; // VALE2IS
when '110 1000 0011 101' return Sys_TLBI; // VALE3IS
when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS
when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS
when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1
when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1
when '000 1000 0111 000' return Sys_TLBI; // VMALLE1
when '100 1000 0111 000' return Sys_TLBI; // ALLE2
when '110 1000 0111 000' return Sys_TLBI; // ALLE3
when '000 1000 0111 001' return Sys_TLBI; // VAE1
when '100 1000 0111 001' return Sys_TLBI; // VAE2
when '110 1000 0111 001' return Sys_TLBI; // VAE3
when '000 1000 0111 010' return Sys_TLBI; // ASIDE1
when '000 1000 0111 011' return Sys_TLBI; // VAAE1
when '100 1000 0111 100' return Sys_TLBI; // ALLE1
when '000 1000 0111 101' return Sys_TLBI; // VALE1
when '100 1000 0111 101' return Sys_TLBI; // VALE2
when '110 1000 0111 101' return Sys_TLBI; // VALE3
when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1
when '000 1000 0111 111' return Sys_TLBI; // VAALE1
return Sys_SYSPSTATEField_SP
};;
// SysOp()
// =======
SystemOpenumeration SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2)
case op1:CRn:CRm:op2 of
when '000 0111 1000 000' returnSystemOp { Sys_AT; // S1E1R
when '100 0111 1000 000' returnSys_AT, Sys_AT; // S1E2R
when '110 0111 1000 000' returnSys_DC, Sys_AT; // S1E3R
when '000 0111 1000 001' returnSys_IC, Sys_AT; // S1E1W
when '100 0111 1000 001' returnSys_TLBI, Sys_AT; // S1E2W
when '110 0111 1000 001' return Sys_AT; // S1E3W
when '000 0111 1000 010' return Sys_AT; // S1E0R
when '000 0111 1000 011' return Sys_AT; // S1E0W
when '100 0111 1000 100' return Sys_AT; // S12E1R
when '100 0111 1000 101' return Sys_AT; // S12E1W
when '100 0111 1000 110' return Sys_AT; // S12E0R
when '100 0111 1000 111' return Sys_AT; // S12E0W
when '011 0111 0100 001' return Sys_DC; // ZVA
when '000 0111 0110 001' return Sys_DC; // IVAC
when '000 0111 0110 010' return Sys_DC; // ISW
when '011 0111 1010 001' return Sys_DC; // CVAC
when '000 0111 1010 010' return Sys_DC; // CSW
when '011 0111 1011 001' return Sys_DC; // CVAU
when '011 0111 1110 001' return Sys_DC; // CIVAC
when '000 0111 1110 010' return Sys_DC; // CISW
when '011 0111 1101 001' return Sys_DC; // CVADP
when '000 0111 0001 000' return Sys_IC; // IALLUIS
when '000 0111 0101 000' return Sys_IC; // IALLU
when '011 0111 0101 001' return Sys_IC; // IVAU
when '100 1000 0000 001' return Sys_TLBI; // IPAS2E1IS
when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS
when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS
when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS
when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS
when '000 1000 0011 001' return Sys_TLBI; // VAE1IS
when '100 1000 0011 001' return Sys_TLBI; // VAE2IS
when '110 1000 0011 001' return Sys_TLBI; // VAE3IS
when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS
when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS
when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS
when '000 1000 0011 101' return Sys_TLBI; // VALE1IS
when '100 1000 0011 101' return Sys_TLBI; // VALE2IS
when '110 1000 0011 101' return Sys_TLBI; // VALE3IS
when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS
when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS
when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1
when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1
when '000 1000 0111 000' return Sys_TLBI; // VMALLE1
when '100 1000 0111 000' return Sys_TLBI; // ALLE2
when '110 1000 0111 000' return Sys_TLBI; // ALLE3
when '000 1000 0111 001' return Sys_TLBI; // VAE1
when '100 1000 0111 001' return Sys_TLBI; // VAE2
when '110 1000 0111 001' return Sys_TLBI; // VAE3
when '000 1000 0111 010' return Sys_TLBI; // ASIDE1
when '000 1000 0111 011' return Sys_TLBI; // VAAE1
when '100 1000 0111 100' return Sys_TLBI; // ALLE1
when '000 1000 0111 101' return Sys_TLBI; // VALE1
when '100 1000 0111 101' return Sys_TLBI; // VALE2
when '110 1000 0111 101' return Sys_TLBI; // VALE3
when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1
when '000 1000 0111 111' return Sys_TLBI; // VAALE1
return Sys_SYS;Sys_SYS};
enumerationconstant bits(16) SystemOp {ASID_NONE =Sys_AT, Sys_DC, Sys_IC, Sys_TLBI, Sys_SYS};();
constant bits(16)// Broadcast
// =========
// IMPLEMENTATION DEFINED function to broadcast TLBI operation within the indicated shareability
// domain.
Broadcast( ASID_NONE = Zeros();shareability, TLBIRecord r)
IMPLEMENTATION_DEFINED;
// Broadcast
// =========
// IMPLEMENTATION DEFINED function to broadcast TLBI operation within the indicated shareability
// domain.
// HasLargeAddress()
// =================
// Returns TRUE if the regime is configured for 52 bit addresses, FALSE otherwise.
Broadcast(booleanHasLargeAddress(Regime regime)
if !Have52BitIPAAndPASpaceExt() then
return FALSE;
case regime of
when Regime_EL3
return TCR_EL3<32> == '1';
when Regime_EL2
return TCR_EL2<32> == '1';
when Regime_EL20
return TCR_EL2<59> == '1';
when Regime_EL10
return TCR_EL1<59> == '1';
otherwise
UnreachableShareability shareability, TLBIRecord r)
IMPLEMENTATION_DEFINED;();
// HasLargeAddress()
// =================
// Returns TRUE if the regime is configured for 52 bit addresses, FALSE otherwise.
booleanenumeration HasLargeAddress(Regime {Regime regime)
if !Regime_EL10, // EL1&0Have52BitIPAAndPASpaceExt() then
return FALSE;
case regime of
whenRegime_EL20, // EL2&0 Regime_EL3
return TCR_EL3<32> == '1';
whenRegime_EL2, // EL2 Regime_EL2
return TCR_EL2<32> == '1';
when Regime_EL20
return TCR_EL2<59> == '1';
when Regime_EL10
return TCR_EL1<59> == '1';
otherwise
Unreachable();Regime_EL3 // EL3
};
enumeration Regime {SecurityState {
Regime_EL10, // EL1&0SS_NonSecure,
Regime_EL20, // EL2&0SS_Secure
};
Regime_EL2, // EL2
Regime_EL3 // EL3
};
enumeration// SecurityStateAtEL()
// ===================
// Returns the effective security state at the exception level based off current settings.
SecurityState SecurityState {SecurityStateAtEL(bits(2) EL)
if !
SS_NonSecure,(
) then
if boolean IMPLEMENTATION_DEFINED "Secure-only implementation" then
return SS_Secure;
else
return SS_NonSecure;
elsif EL == EL3 then
return SS_Secure;
else
// For EL2 call only when EL2 is enabled in current security state
assert(EL != EL2 || EL2Enabled());
if !ELUsingAArch32(EL3) then
return if SCR_EL3.NS == '1' then SS_NonSecure else SS_Secure;
else
return if SCR.NS == '1' then SS_NonSecure else SS_SecureSS_Secure
};;
// SecurityStateAtEL()
// ===================
// Returns the effective security state at the exception level based off current settings.
SecurityStateenumeration SecurityStateAtEL(bits(2) EL)
if !Shareability {HaveEL(Shareability_None,EL3) then
if boolean IMPLEMENTATION_DEFINED "Secure-only implementation" then
returnShareability_Inner, SS_Secure;
else
return SS_NonSecure;
elsif EL == EL3 then
return SS_Secure;
else
// For EL2 call only when EL2 is enabled in current security state
assert(EL != EL2 || EL2Enabled());
if !ELUsingAArch32(EL3) then
return if SCR_EL3.NS == '1' then SS_NonSecure else SS_Secure;
else
return if SCR.NS == '1' then SS_NonSecure else SS_Secure;Shareability_Outer
};
enumeration// TLBI
// ====
// IMPLEMENTATION DEFINED TLBI function.
TLBI(TLBIRecord r)
IMPLEMENTATION_DEFINED; Shareability {
Shareability_NSH,
Shareability_ISH,
Shareability_OSH
};
// TLBI
// ====
// IMPLEMENTATION DEFINED TLBI function.
TLBI(TLBIRecord r)
IMPLEMENTATION_DEFINED;enumerationTLBILevel {
TLBILevel_Any,
TLBILevel_Last
};
enumerationenumeration TLBIOp {
TLBIOp_ALL,
TLBIOp_ASID,
TLBIOp_IPAS2,
TLBIOp_VAA,
TLBIOp_VA,
TLBIOp_VMALL,
TLBIOp_VMALLS12,
TLBIOp_RIPAS2,
TLBIOp_RVAA,
TLBIOp_RVA,
}; TLBILevel {
TLBILevel_Any,
TLBILevel_Last
};
enumeration TLBIOp {
TLBIOp_ALL,
TLBIOp_ASID,
TLBIOp_IPAS2,
TLBIOp_VAA,
TLBIOp_VA,
TLBIOp_VMALL,
TLBIOp_VMALLS12,
TLBIOp_RIPAS2,
TLBIOp_RVAA,
TLBIOp_RVA,
};type TLBIRecord is (
TLBIOp op,SecurityState security,
Regime regime,
bits(16) vmid,
bits(16) asid,
TLBILevel level,
TLBI_MemAttr attr,
FullAddress address, // VA/IPA/BaseAddress
bits(64) end_address, // for range operations, end address
bits(2) tg, // for range - the TG parameter
bits(4) ttl,
)
type TLBIRecord is (
TLBIOp op,// TLBI_ALL()
// ==========
// Invalidates all entries for the indicated translation regime with the
// the indicated security state for all TLBs within the indicated shareability domain.
// Invalidation applies to all applicable stage 1 and stage 2 entries.
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_ALL(SecurityState security,
Regime regime,
bits(16) vmid,
bits(16) asid,regime,
TLBILevelShareability level,shareability,
TLBI_MemAttr attr,attr)
assert PSTATE.EL IN {
, EL2};
TLBIRecord r;
r.op = TLBIOp_ALL;
r.security = security;
r.regime = regime;
r.level = TLBILevel_Any;
r.attr = attr;
TLBI(r);
if shareability != Shareability_NoneFullAddressEL3 address, // VA/IPA/BaseAddress
bits(64) end_address, // for range operations, end address
bits(2) tg, // for range - the TG parameter
bits(4) ttl,
)then Broadcast(shareability, r);
return;
// TLBI_ALL()
// ==========
// Invalidates all entries for the indicated translation regime with the
// the indicated security state for all TLBs within the indicated shareability domain.
// Invalidation applies to all applicable stage 1 and stage 2 entries.
// TLBI_ASID()
// ===========
// Invalidates all stage 1 entries matching the indicated VMID (where regime supports)
// and ASID in the parameter Xt in the indicated translation regime with the
// indicated security state for all TLBs within the indicated shareability domain.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_ALL(TLBI_ASID(SecurityState security, Regime regime,regime, bits(16) vmid, Shareability shareability, TLBI_MemAttr attr)
attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2};
TLBIRecord r;
r.op = TLBIOp_ALL;
r.security = security;
r.regime = regime;
r.level =, EL1};
TLBIRecord r;
r.op = TLBIOp_ALL;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = TLBILevel_Any;
r.attr = attr;
r.asid = Xt<63:48>;
TLBI(r);
if shareability != Shareability_NSHShareability_None then Broadcast(shareability, r);
return;
// TLBI_ASID()
// ===========
// Invalidates all stage 1 entries matching the indicated VMID (where regime supports)
// and ASID in the parameter Xt in the indicated translation regime with the
// indicated security state for all TLBs within the indicated shareability domain.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// TLBI_IPAS2()
// ============
// Invalidate by IPA all stage 2 only TLB entries in the indicated shareability
// domain matching the indicated VMID in the indicated regime with the indicated security state.
// Note: stage 1 and stage 2 combined entries are not in the scope of this operation.
// IPA and related parameters of the are derived from Xt.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_ASID(TLBI_IPAS2(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability,
TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2,};
TLBIRecord r;
r.op = TLBIOp_IPAS2;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.ttl = Xt<47:44>;
r.address.address = Xt<39:0> : EL1Zeros};
TLBIRecord r;
r.op = TLBIOp_ALL;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level =(12);
r.address.NS = if security == TLBILevel_AnySS_NonSecure;
r.attr = attr;
r.asid = Xt<63:48>;
then '1' else Xt<63>;
TLBI(r);
if shareability != Shareability_NSHShareability_None then Broadcast(shareability, r);
return;
// TLBI_IPAS2()
// ============
// Invalidate by IPA all stage 2 only TLB entries in the indicated shareability
// domain matching the indicated VMID in the indicated regime with the indicated security state.
// Note: stage 1 and stage 2 combined entries are not in the scope of this operation.
// IPA and related parameters of the are derived from Xt.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.enumeration
TLBI_IPAS2(TLBI_MemAttr {SecurityState security,TLBI_AllAttr, Regime regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2};
TLBIRecord r;
r.op = TLBIOp_IPAS2;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.ttl = Xt<47:44>;
r.address.address = Xt<39:0> : Zeros(12);
r.address.NS = if security == SS_NonSecure then '1' else Xt<63>;
TLBI(r);
if shareability != Shareability_NSH then Broadcast(shareability, r);
return;TLBI_ExcludeXS
};
enumeration// TLBI_RIPAS2()
// =============
// Range invalidate by IPA all stage 2 only TLB entries in the indicated
// shareability domain matching the indicated VMID in the indicated regime with the indicated
// security state.
// Note: stage 1 and stage 2 combined entries are not in the scope of this operation.
// The range of IPA and related parameters of the are derived from Xt.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete. TLBI_MemAttr {TLBI_RIPAS2(
TLBI_AllAttr,security,
regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2, EL1};
TLBIRecord r;
r.op = TLBIOp_RIPAS2;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.ttl = Xt<47:44>;
bits(2) tg = Xt<47:46>;
integer scale = UInt(Xt<45:44>);
integer num = UInt(Xt<43:39>);
integer baseaddr = SInt(Xt<36:0>);
bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) = TLBI_Range(regime, Xt);
if !valid then return;
r.address.address = start_address<51:0>;
TLBI(r);
if shareability != Shareability_NoneTLBI_ExcludeXS
};then Broadcast(shareability, r);
return;
// TLBI_RIPAS2()
// =============
// Range invalidate by IPA all stage 2 only TLB entries in the indicated
// shareability domain matching the indicated VMID in the indicated regime with the indicated
// security state.
// Note: stage 1 and stage 2 combined entries are not in the scope of this operation.
// The range of IPA and related parameters of the are derived from Xt.
// TLBI_RVA()
// ==========
// Range invalidate by VA range all stage 1 TLB entries in the indicated
// shareability domain matching the indicated VMID and ASID (where regime
// supports VMID, ASID) in the indicated regime with the indicated security state.
// ASID, and range related parameters are derived from Xt.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_RIPAS2(TLBI_RVA(SecurityState security, Regime regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2, EL1};
TLBIRecord r;
r.op = TLBIOp_RIPAS2;
r.op = TLBIOp_RVA;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.asid = Xt<63:48>;
r.ttl = Xt<47:44>;
bits(2) tg = Xt<47:46>;
integer scale = bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) = UIntTLBI_Range(Xt<45:44>);
integer num =(regime, Xt);
if !valid then return;
r.address.address = start_address<51:0>;
r.address.NS = if security == UIntSS_NonSecure(Xt<43:39>);
integer baseaddr =then '1' else Xt<63>;
TLBI(r);
if shareability != SIntShareability_None(Xt<36:0>);
bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) = TLBI_Range(regime, Xt);
if !valid then return;
r.address.address = start_address<51:0>;
TLBI(r);
if shareability != Shareability_NSH then Broadcast(shareability, r);
return;
// TLBI_RVA()
// ==========
// TLBI_RVAA()
// ===========
// Range invalidate by VA range all stage 1 TLB entries in the indicated
// shareability domain matching the indicated VMID and ASID (where regime
// supports VMID, ASID) in the indicated regime with the indicated security state.
// ASID, and range related parameters are derived from Xt.
// shareability domain matching the indicated VMID (where regimesupports VMID)
// and all ASID in the indicated regime with the indicated security state.
// VA range related parameters are derived from Xt.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_RVA(TLBI_RVAA(SecurityState security, Regime regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2, EL1};
TLBIRecord r;
r.op = TLBIOp_RVA;
r.op = TLBIOp_RVAA;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.asid = Xt<63:48>;
r.ttl = Xt<47:44>;
bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) = bits(2) tg = Xt<47:46>;
integer scale = UInt(Xt<45:44>);
integer num = UInt(Xt<43:39>);
integer baseaddr = SInt(Xt<36:0>);
bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) = TLBI_Range(regime, Xt);
if !valid then return;
r.address.address = start_address<51:0>;
r.address.NS = if security == TLBI(r);
if shareability != SS_NonSecureShareability_None then '1' else Xt<63>;
TLBI(r);
if shareability != Shareability_NSH then Broadcast(shareability, r);
return;
// TLBI_RVAA()
// ===========
// Range invalidate by VA range all stage 1 TLB entries in the indicated
// shareability domain matching the indicated VMID (where regimesupports VMID)
// and all ASID in the indicated regime with the indicated security state.
// VA range related parameters are derived from Xt.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.// TLBI_Range()
// ============
// Extract the input address range information from encoded Xt.
(boolean, bits(2), bits(64), bits(64))
TLBI_RVAA(TLBI_Range(SecurityState security, Regime regime, bits(16) vmid,regime, bits(64) Xt)
boolean valid = TRUE;
bits(64) start =
ShareabilityZeros shareability,(64);
bits(64) end = TLBILevelZeros level,(64);
bits(2) tg = Xt<47:46>;
integer scale = TLBI_MemAttrUInt attr, bits(64) Xt)
assert PSTATE.EL IN {(Xt<45:44>);
integer num =EL3UInt,(Xt<43:39>);
integer tg_bits;
if tg == '00' then
return (FALSE, tg, start, end);
case tg of
when '01' // 4KB
tg_bits = 12;
if EL2HasLargeAddress,(regime) then
start<52:16> = Xt<36:0>;
start<63:53> = EL1Replicate};
TLBIRecord r;
r.op = TLBIOp_RVAA;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.ttl = Xt<47:44>;
bits(2) tg = Xt<47:46>;
integer scale =(Xt<36>, 11);
else
start<48:12> = Xt<36:0>;
start<63:49> = UIntReplicate(Xt<45:44>);
integer num =(Xt<36>, 15);
when '10' // 16KB
tg_bits = 14;
if UIntHasLargeAddress(Xt<43:39>);
integer baseaddr =(regime) then
start<52:16> = Xt<36:0>;
start<63:53> = SIntReplicate(Xt<36:0>);
bits(64) start_addres;
boolean valid;
(valid, r.tg, start_address, r.end_address) =(Xt<36>, 11);
else
start<50:14> = Xt<36:0>;
start<63:51> = TLBI_RangeReplicate(regime, Xt);
if !valid then return;
r.address.address = start_address<51:0>;
TLBI(r);
if shareability !=(Xt<36>, 13);
when '11' // 64KB
tg_bits = 16;
start<52:16> = Xt<36:0>;
start<63:53> = (Xt<36>, 11);
otherwise
Unreachable();
integer range = (num+1) << (5*scale + 1 + tg_bits);
end = start + range<63:0>;
if end<52> != start<52> then
// overflow, saturate it
end = Replicate(start<52>, 64-52) : OnesShareability_NSHReplicate then Broadcast(shareability, r);
return;(52);
return (valid, tg, start, end);
// TLBI_Range()
// ============
// Extract the input address range information from encoded Xt.
(boolean, bits(2), bits(64), bits(64))// TLBI_VA()
// =========
// Invalidate by VA all stage 1 TLB entries in the indicated shareability domain
// matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime
// with the indicated security state.
// ASID, VA and related parameters are derived from Xt.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete. TLBI_Range(TLBI_VA(SecurityState security, Regime regime, bits(64) Xt)
boolean valid = TRUE;
bits(64) start =regime, bits(16) vmid, ZerosShareability(64);
bits(64) end =shareability, ZerosTLBILevel(64);
bits(2) tg = Xt<47:46>;
integer scale =level, UIntTLBI_MemAttr(Xt<45:44>);
integer num =attr, bits(64) Xt)
assert PSTATE.EL IN { UIntEL3(Xt<43:39>);
integer tg_bits;
if tg == '00' then
return (FALSE, tg, start, end);
case tg of
when '01' // 4KB
tg_bits = 12;
if, HasLargeAddressEL2(regime) then
start<52:16> = Xt<36:0>;
start<63:53> =, ReplicateEL1(Xt<36>, 11);
else
start<48:12> = Xt<36:0>;
start<63:49> =};
TLBIRecord r;
r.op = TLBIOp_VA;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.asid = Xt<63:48>;
r.ttl = Xt<47:44>;
r.address.address = Xt<39:0> : ReplicateZeros(Xt<36>, 15);
when '10' // 16KB
tg_bits = 14;
if(12);
TLBI(r);
if shareability != HasLargeAddressShareability_None(regime) then
start<52:16> = Xt<36:0>;
start<63:53> = Replicate(Xt<36>, 11);
else
start<50:14> = Xt<36:0>;
start<63:51> = Replicate(Xt<36>, 13);
when '11' // 64KB
tg_bits = 16;
start<52:16> = Xt<36:0>;
start<63:53> = Replicate(Xt<36>, 11);
otherwise
Unreachable();
integer range = (num+1) << (5*scale + 1 + tg_bits);
end = start + range<63:0>;
if end<52> != start<52> then
// overflow, saturate it
end = Replicate(start<52>, 64-52) : Ones(52);
return (valid, tg, start, end);then Broadcast(shareability, r);
return;
// TLBI_VA()
// =========
// TLBI_VAA()
// ==========
// Invalidate by VA all stage 1 TLB entries in the indicated shareability domain
// matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime
// matching the indicated VMID (where regime supports VMID) and all ASID in the indicated regime
// with the indicated security state.
// ASID, VA and related parameters are derived from Xt.
// VA and related parameters are derived from Xt.
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_VA(TLBI_VAA(SecurityState security, Regime regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
assert PSTATE.EL IN {EL3, EL2, EL1};
TLBIRecord r;
r.op = TLBIOp_VA;
r.op = TLBIOp_VAA;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.asid = Xt<63:48>;
r.ttl = Xt<47:44>;
r.address.address = Xt<39:0> : Zeros(12);
TLBI(r);
if shareability != Shareability_NSHShareability_None then Broadcast(shareability, r);
return;
// TLBI_VAA()
// ==========
// Invalidate by VA all stage 1 TLB entries in the indicated shareability domain
// matching the indicated VMID (where regime supports VMID) and all ASID in the indicated regime
// with the indicated security state.
// VA and related parameters are derived from Xt.
// TLBI_VMALL()
// ============
// Invalidates all stage 1 entries for the indicated translation regime with the
// the indicated security state for all TLBs within the indicated shareability
// domain that match the indicated VMID (where applicable).
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// When the indicated level is
// TLBILevel_Any : this applies to TLB entries at all levels
// TLBILevel_Last : this applies to TLB entries at last level only
// Note: stage 2 only entries are not in the scope of this operation.
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_VAA(TLBI_VMALL(SecurityState security, Regime regime, bits(16) vmid,
Shareability shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt)
attr)
assert PSTATE.EL IN {EL3, EL2, EL1};
TLBIRecord r;
r.op = TLBIOp_VAA;
r.op = TLBIOp_VMALL;
r.security = security;
r.regime = regime;
r.vmid = vmid;
r.level = level;
r.attr = attr;
r.ttl = Xt<47:44>;
r.address.address = Xt<39:0> : r.level = ZerosTLBILevel_Any(12);
;
r.vmid = vmid;
r.attr = attr;
TLBI(r);
if shareability != Shareability_NSHShareability_None then Broadcast(shareability, r);
return;
// TLBI_VMALL()
// ============
// Invalidates all stage 1 entries for the indicated translation regime with the
// the indicated security state for all TLBs within the indicated shareability
// domain that match the indicated VMID (where applicable).
// Note: stage 1 and stage 2 combined entries are in the scope of this operation.
// Note: stage 2 only entries are not in the scope of this operation.
// TLBI_VMALLS12()
// ===============
// Invalidates all stage 1 and stage 2 entries for the indicated translation
// regime with the indicated security state for all TLBs within the indicated
// shareability domain that match the indicated VMID.
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.
TLBI_VMALL(TLBI_VMALLS12(SecurityState security, Regime regime, bits(16) vmid,
Shareability shareability, TLBI_MemAttr attr)
assert PSTATE.EL IN {EL3, EL2,};
TLBIRecord r;
r.op = TLBIOp_VMALLS12;
r.security = security;
r.regime = regime;
r.level = EL1};
TLBIRecord r;
r.op = TLBIOp_VMALL;
r.security = security;
r.regime = regime;
r.level = TLBILevel_Any;
r.vmid = vmid;
r.attr = attr;
TLBI(r);
if shareability != Shareability_NSHShareability_None then Broadcast(shareability, r);
return;
// TLBI_VMALLS12()
// ===============
// Invalidates all stage 1 and stage 2 entries for the indicated translation
// regime with the indicated security state for all TLBs within the indicated
// shareability domain that match the indicated VMID.
// The indicated attr defines the attributes of the memory operations that must be completed in
// order to deem this operation to be completed.
// When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation
// are required to complete.// VMID[]
// ======
// Effective VMID.
bits(16)
TLBI_VMALLS12(VMID[]
ifSecurityStateEL2Enabled security,() then
return VTTBR_EL2.VMID;
elsif RegimeHaveEL regime, bits(16) vmid,(
ShareabilityEL2 shareability,) && TLBI_MemAttrHaveSecureEL2Ext attr)
assert PSTATE.EL IN {() then
returnEL3Zeros,(16);
else
return EL2VMID_NONE};
TLBIRecord r;
r.op = TLBIOp_VMALLS12;
r.security = security;
r.regime = regime;
r.level = TLBILevel_Any;
r.vmid = vmid;
r.attr = attr;
TLBI(r);
if shareability != Shareability_NSH then Broadcast(shareability, r);
return;;
// VMID[]
// ======
// Effective VMID.
bits(16)constant bits(16) VMID[]
ifVMID_NONE = EL2Enabled() then
return VTTBR_EL2.VMID;
elsif HaveEL(EL2) && HaveSecureEL2Ext() then
return Zeros(16);
else
return VMID_NONE;();
constant bits(16)enumeration VMID_NONE =VBitOp { VBitOp_VBIF, VBitOp_VBIT, VBitOp_VBSL, Zeros();VBitOp_VEOR};
enumeration VBitOp {CompareOp {VBitOp_VBIF,CompareOp_GT, VBitOp_VBIT,CompareOp_GE, VBitOp_VBSL,CompareOp_EQ, VBitOp_VEOR};CompareOp_LE,CompareOp_LT};
enumeration CompareOp {ImmediateOp {CompareOp_GT,ImmediateOp_MOVI, CompareOp_GE,ImmediateOp_MVNI, CompareOp_EQ,ImmediateOp_ORR,
CompareOp_LE,ImmediateOp_BIC}; CompareOp_LT};
enumeration// Reduce()
// ========
bits(esize) ImmediateOp {Reduce(ImmediateOp_MOVI,op, bits(N) input, integer esize)
boolean altfp = ImmediateOp_MVNI,() && !
UsingAArch32() && FPCR.AH == '1';
return Reduce(op, input, esize, altfp);
// Reduce()
// ========
// Perform the operation 'op' on pairs of elements from the input vector,
// reducing the vector to a scalar result. The 'altfp' argument controls
// alternative floating-point behaviour.
bits(esize) ImmediateOp_ORR,Reduce( op, bits(N) input, integer esize, boolean altfp)
integer half;
bits(esize) hi;
bits(esize) lo;
bits(esize) result;
if N == esize then
return input<esize-1:0>;
half = N DIV 2;
hi = Reduce(op, input<N-1:half>, esize, altfp);
lo = Reduce(op, input<half-1:0>, esize, altfp);
case op of
when ReduceOp_FMINNUM
result = FPMinNum(lo, hi, FPCR[]);
when ReduceOp_FMAXNUM
result = FPMaxNum(lo, hi, FPCR[]);
when ReduceOp_FMIN
result = FPMin(lo, hi, FPCR[], altfp);
when ReduceOp_FMAX
result = FPMax(lo, hi, FPCR[], altfp);
when ReduceOp_FADD
result = FPAdd(lo, hi, FPCR[]);
when ReduceOp_ADDImmediateOp_BIC};result = lo + hi;
return result;
// Reduce()
// ========
bits(esize)enumeration Reduce(ReduceOp {ReduceOp op, bits(N) input, integer esize)
boolean altfp =ReduceOp_FMINNUM, HaveAltFP() && !ReduceOp_FMAXNUM,UsingAArch32() && FPCR.AH == '1';
returnReduceOp_FMIN, Reduce(op, input, esize, altfp);
// Reduce()
// ========
// Perform the operation 'op' on pairs of elements from the input vector,
// reducing the vector to a scalar result. The 'altfp' argument controls
// alternative floating-point behaviour.
bits(esize)ReduceOp_FMAX, Reduce(ReduceOp_FADD,ReduceOp op, bits(N) input, integer esize, boolean altfp)
integer half;
bits(esize) hi;
bits(esize) lo;
bits(esize) result;
if N == esize then
return input<esize-1:0>;
half = N DIV 2;
hi = Reduce(op, input<N-1:half>, esize, altfp);
lo = Reduce(op, input<half-1:0>, esize, altfp);
case op of
when ReduceOp_FMINNUM
result = FPMinNum(lo, hi, FPCR[]);
when ReduceOp_FMAXNUM
result = FPMaxNum(lo, hi, FPCR[]);
when ReduceOp_FMIN
result = FPMin(lo, hi, FPCR[], altfp);
when ReduceOp_FMAX
result = FPMax(lo, hi, FPCR[], altfp);
when ReduceOp_FADD
result = FPAdd(lo, hi, FPCR[]);
when ReduceOp_ADD
result = lo + hi;
return result;ReduceOp_ADD};
enumeration// AArch64.CombineS1S2Desc()
// =========================
// Combines the address descriptors from stage 1 and stage 2
AddressDescriptor ReduceOp {AArch64.CombineS1S2Desc(ReduceOp_FMINNUM,s1desc, ReduceOp_FMAXNUM,s2desc,
ReduceOp_FMIN,s2acctype) ReduceOp_FMAX,result;
result.paddress = s2desc.paddress;
apply_force_writeback =
ReduceOp_FADD,() && HCR_EL2.FWB == '1';
if (s1desc) || IsFault(s2desc) then
result = if IsFault(s1desc) then s1desc else s2desc;
else
result.fault = AArch64.NoFault();
if s2desc.memattrs.memtype == MemType_Device || (
(apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then
result.memattrs.memtype = MemType_Device;
if s1desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
// S1 can be either Normal or Device, S2 is Normal.
else
result.memattrs.memtype = MemType_Normal;
result.memattrs.device = DeviceType UNKNOWN;
result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype);
result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == MemAttr_WB &&
result.memattrs.inner.hints == MemHint_RWA &&
result.memattrs.outer.attrs == MemAttr_WB &&
result.memattrs.outer.hints == MemHint_RWA);
result.memattrs = MemAttrDefaultsReduceOp_ADD};(result.memattrs);
return result;
// AArch64.CombineS1S2Desc()
// =========================
// Combines the address descriptors from stage 1 and stage 2
// AArch64.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
AddressDescriptor AArch64.CombineS1S2Desc(AArch64.InstructionDevice(AddressDescriptor s1desc,addrdesc, bits(64) vaddress,
bits(52) ipaddress, integer level, AddressDescriptor s2desc, AccType s2acctype)acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =
AddressDescriptorConstrainUnpredictable result;
result.paddress = s2desc.paddress;
apply_force_writeback =( HaveStage2MemAttrControlUnpredictable_INSTRDEVICE() && HCR_EL2.FWB == '1';
if);
assert c IN { IsFaultConstraint_NONE(s1desc) ||, IsFaultConstraint_FAULT(s2desc) then
result = if};
if c == IsFaultConstraint_FAULT(s1desc) then s1desc else s2desc;
else
result.fault =then
addrdesc.fault = AArch64.NoFaultAArch64.PermissionFault();
if s2desc.memattrs.memtype ==(ipaddress, boolean UNKNOWN, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype = MemType_Device || (
(apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then
result.memattrs.memtype = MemType_Device;
if s1desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype ==;
addrdesc.memattrs.inner.attrs = MemType_NormalMemAttr_NC then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device =;
addrdesc.memattrs.inner.hints = CombineS1S2DeviceMemHint_No(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
// S1 can be either Normal or Device, S2 is Normal.
else
result.memattrs.memtype = MemType_Normal;
result.memattrs.device = DeviceType UNKNOWN;
result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype, s1desc.memattrs.memtype);
result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype, s1desc.memattrs.memtype);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == MemAttr_WB &&
result.memattrs.inner.hints == MemHint_RWA &&
result.memattrs.outer.attrs == MemAttr_WB &&
result.memattrs.outer.hints == MemHint_RWA);
result.memattrs =;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs = MemAttrDefaults(result.memattrs);
(addrdesc.memattrs);
return result; return addrdesc;
// AArch64.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
// AArch64.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
AddressDescriptorMemoryAttributes AArch64.InstructionDevice(AArch64.S1AttrDecode(bits(2) SH, bits(3) attr,AddressDescriptor addrdesc, bits(64) vaddress,
bits(52) ipaddress, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =acctype) ConstrainUnpredictableMemoryAttributes(memattrs;
mair =Unpredictable_INSTRDEVICEMAIR);
assert c IN {[];
index = 8 *Constraint_NONEUInt,(attr);
attrfield = mair<index+7:index>;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) = Constraint_FAULTConstrainUnpredictableBits};
if c ==( Constraint_FAULTUnpredictable_RESMAIR then
addrdesc.fault =);
if ! AArch64.PermissionFaultHaveMTEExt(ipaddress, boolean UNKNOWN, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype =() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = MemType_Device;
case attrfield<3:0> of
when '0000' memattrs.device = DeviceType_nGnRnE;
when '0100' memattrs.device = DeviceType_nGnRE;
when '1000' memattrs.device = DeviceType_nGRE;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
addrdesc.memattrs.inner.attrs = memattrs.outer = MemAttr_NCLongConvertAttrsHints;
addrdesc.memattrs.inner.hints =(attrfield<7:4>, acctype);
memattrs.inner = (attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged WB-RWA
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = TRUE;
else
Unreachable(); // Reserved, handled above
if ((HCR_EL2.VM == '1' || HCR_EL2.DC == '1') &&
(PSTATE.EL == EL1 || (PSTATE.EL == EL0 && HCR_EL2.TGE == '0')) &&
acctype != AccType_NV2REGISTERMemHint_NoLongConvertAttrsHints;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs =) then
return memattrs;
else
return MemAttrDefaults(addrdesc.memattrs);
return addrdesc;(memattrs);
// AArch64.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
// AArch64.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
MemoryAttributesTLBRecord AArch64.S1AttrDecode(bits(2) SH, bits(3) attr,AArch64.TranslateAddressS1Off(bits(64) vaddress, AccType acctype)acctype, boolean iswrite)
assert !
MemoryAttributesELUsingAArch32 memattrs;
mair =( MAIRS1TranslationRegime[];
index = 8 *()); UIntTLBRecord(attr);
attrfield = mair<index+7:index>;
result;
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) = Top = ConstrainUnpredictableBitsAddrTop((vaddress, (acctype ==Unpredictable_RESMAIRAccType_IFETCH);
), PSTATE.EL);
if !HaveMTE2ExtIsZero() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) =(vaddress<Top: ConstrainUnpredictableBitsPAMax(()>) then
level = 0;
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
result.addrdesc.fault =Unpredictable_RESMAIRAArch64.AddressSizeFault);
(ipaddress,boolean UNKNOWN, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = default_cacheable = ( MemType_DeviceHasS2Translation;
case attrfield<3:0> of
when '0000' memattrs.device =() && HCR_EL2.DC == '1');
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = DeviceType_nGnRnEMemType_Normal;
when '0100' memattrs.device = result.addrdesc.memattrs.inner.attrs = DeviceType_nGnREMemAttr_WB;
when '1000' memattrs.device =; // Write-back
result.addrdesc.memattrs.inner.hints = DeviceType_nGREMemHint_RWA;
when '1100' memattrs.device = result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != DeviceType_GREAccType_IFETCH;
otherwisethen
// Treat data as Device
result.addrdesc.memattrs.memtype = UnreachableMemType_Device(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype =;
result.addrdesc.memattrs.device = MemType_NormalDeviceType_nGnRnE;
memattrs.outer = result.addrdesc.memattrs.inner = LongConvertAttrsHintsMemAttrHints(attrfield<7:4>, acctype);
memattrs.inner =UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR_ELx.I
cacheable = LongConvertAttrsHintsSCTLR(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif[].I == '1';
result.addrdesc.memattrs.memtype = HaveMTE2Ext() && attrfield == '11110000' then // Normal, Tagged WB-RWA
memattrs.memtype = MemType_Normal;
memattrs.outer = if cacheable then
result.addrdesc.memattrs.inner.attrs = LongConvertAttrsHintsMemAttr_WT('1111', acctype); // WB_RWA
memattrs.inner =;
result.addrdesc.memattrs.inner.hints = LongConvertAttrsHintsMemHint_RA('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = TRUE;
else;
else
result.addrdesc.memattrs.inner.attrs =
UnreachableMemAttr_NC(); // Reserved, handled above
if ((HCR_EL2.VM == '1' || HCR_EL2.DC == '1') &&
(PSTATE.EL ==;
result.addrdesc.memattrs.inner.hints = EL1MemHint_No || (PSTATE.EL ==;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = EL0MemAttrDefaults && HCR_EL2.TGE == '0')) &&
acctype !=(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = vaddress<51:0>;
result.addrdesc.paddress.NS = if AccType_NV2REGISTERIsSecure ) then
return memattrs;
else
return() then '0' else '1';
result.addrdesc.fault = MemAttrDefaultsAArch64.NoFault(memattrs);();
result.descupdate.descaddr = result.addrdesc;
return result;
// AArch64.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
// AArch64.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
TLBRecordbits(2) AArch64.TranslateAddressS1Off(bits(64) vaddress,AArch64.AccessUsesEL( AccType acctype, boolean iswrite)
assert !acctype)
if acctype ==ELUsingAArch32AccType_UNPRIV(then
returnS1TranslationRegimeEL0());;
elsif acctype ==
TLBRecordAccType_NV2REGISTER result;
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
Top =then
return AddrTopEL2(vaddress, (acctype == AccType_IFETCH), PSTATE.EL);
if !IsZero(vaddress<Top:PAMax()>) then
level = 0;
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,boolean UNKNOWN, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
default_cacheable = (HasS2Translation() && HCR_EL2.DC == '1');
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = MemType_Normal;
result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR_ELx.I
cacheable = SCTLR[].I == '1';
result.addrdesc.memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = MemAttr_WT;
result.addrdesc.memattrs.inner.hints = MemHint_RA;
else
result.addrdesc.memattrs.inner.attrs = MemAttr_NC;
result.addrdesc.memattrs.inner.hints = MemHint_No;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = vaddress<51:0>;
result.addrdesc.paddress.NS = if IsSecure() then '0' else '1';
result.addrdesc.fault = AArch64.NoFault();
result.descupdate.descaddr = result.addrdesc;
return result;;
else
return PSTATE.EL;
// AArch64.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
// AArch64.CheckPermission()
// =========================
// Function used for permission checking from AArch64 stage 1 translations
bits(2)FaultRecord AArch64.AccessUsesEL(AArch64.CheckPermission(Permissions perms, bits(64) vaddress, integer level,
bit NS, AccType acctype)
if acctype ==acctype,
boolean iswrite)
assert ! AccType_UNPRIVELUsingAArch32 then
return( S1TranslationRegime());
wxn = SCTLR[].WXN == '1';
if (PSTATE.EL == EL0;
elsif acctype ==|| IsInHost() ||
(PSTATE.EL == EL1 && !HaveNV2Ext()) ||
(PSTATE.EL == EL1 && HaveNV2Ext() && (acctype != AccType_NV2REGISTER then
return|| ! ELIsInHost(EL2)))) then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
ispriv = AArch64.AccessUsesEL(acctype) != EL0;
user_xn = perms.xn == '1' || (user_w && wxn);
priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w;
pan = if HavePANExt() then PSTATE.PAN else '0';
epan = if HavePAN3Ext() then SCTLR[].EPAN else '0';
// Restriction on Secure instruction fetch
if boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
user_xn = TRUE;
priv_xn = TRUE;
if (EL2Enabled() && ((PSTATE.EL == EL1 && HaveNVExt() && HCR_EL2.<NV, NV1> == '11') ||
(HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then
pan = '0';
is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH});
is_ats1xp = (acctype == AccType_AT && AArch64.ExecutingATS1xPInstr());
if (pan == '1' && (user_r || (epan == '1' && !user_xn)) &&
ispriv && (is_ldst || is_ats1xp)) then
priv_r = FALSE;
priv_w = FALSE;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2 or EL3
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if !boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
failedread = TRUE;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w;
failedread = !r;
elsif iswrite then
fail = !w;
failedread = FALSE;
elsif acctype == AccType_DC && PSTATE.EL != EL0 then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation,
// other than DC IVAC, which requires write permission, and operations executed at EL0,
// which require read permission.
fail = FALSE;
else
fail = !r;
failedread = TRUE;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(52) UNKNOWN;
return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch64.NoFault;
else
return PSTATE.EL;();
// AArch64.CheckPermission()
// =========================
// Function used for permission checking from AArch64 stage 1 translations
// AArch64.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch64 stage 2 translations
FaultRecord AArch64.CheckPermission(AArch64.CheckS2Permission(Permissions perms, bits(64) vaddress, integer level,
bit NS,perms, bits(64) vaddress, bits(52) ipaddress,
integer level, AccType acctype,
boolean iswrite)
assert !acctype, boolean iswrite, boolean NS,
boolean s2fs1walk, boolean hwupdatewalk)
assert (ELUsingAArch32IsSecureEL2Enabled(() || (S1TranslationRegimeHaveEL());
wxn =( SCTLREL2[].WXN == '1';
if (PSTATE.EL ==) && ! EL0IsSecure ||() && !
IsInHostELUsingAArch32() ||
(PSTATE.EL ==( EL1 && !HaveNV2Ext()) ||
(PSTATE.EL == EL1 && HaveNV2Ext() && (acctype != AccType_NV2REGISTER || !ELIsInHost(EL2)))) then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
ispriv =))) && AArch64.AccessUsesELHasS2Translation(acctype) !=();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if EL0HaveExtendedExecuteNeverExt;
user_xn = perms.xn == '1' || (user_w && wxn);
priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w;
pan = if() then
case perms.xn:perms.xxn of
when '00' xn = FALSE;
when '01' xn = PSTATE.EL == HavePANExt() then PSTATE.PAN else '0';
epan = if HavePAN3Ext() then SCTLR[].EPAN else '0';
// Restriction on Secure instruction fetch
if boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
user_xn = TRUE;
priv_xn = TRUE;
if (EL2Enabled() && ((PSTATE.EL == EL1 &&;
when '10' xn = TRUE;
when '11' xn = PSTATE.EL == HaveNVExtEL0() && HCR_EL2.<NV, NV1> == '11') ||
(;
else
xn = perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype ==HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then
pan = '0';
is_ldst = !(acctype IN {AccType_DC, AccType_AT,
AccType_ATPAN, AccType_IFETCH});
is_ats1xp = acctype ==&& !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif (acctype IN { AccType_ATPAN;
if (pan == '1' && (user_r || (epan == '1' && !user_xn)) &&
ispriv && (is_ldst || is_ats1xp)) then
priv_r = FALSE;
priv_w = FALSE;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2 or EL3
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if !boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w; // Report as a read failure if a read of the location would fail.
if fail then iswrite = r;
elsif acctype IN {}) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
elsif acctype ==AccType_IC, AccType_DC} then
if UsingAArch32() then
fail = FALSE;
elsif iswrite && acctype == AccType_DC then
fail = !w;
elsif PSTATE.EL == EL0 && !iswrite then
fail = !r && !(acctype == AccType_IC && !(boolean IMPLEMENTATION_DEFINED "Permission fault on&& PSTATE.EL != EL0 IC_IVAU execution"));
else
fail = FALSE;
elsif iswrite then
&& !s2fs1walk then
// DC maintenance instructions operating by VA, with the exception of DC IVAC, do
// not generate Permission faults from stage 2 translation, other than when
// performing a stage 1 translation table walk.
fail = FALSE;
elsif hwupdatewalk then
fail = !w;
failedread = !iswrite;
else
fail = !r;
failedread = !iswrite;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(52) UNKNOWN;
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype,
iswrite, secondstage, s2fs1walk);
(ipaddress,NS, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch64.NoFault();
// AArch64.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch64 stage 2 translations
// AArch64.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64
// translation regime, when either debug exceptions are enabled, or halting debug is enabled
// and halting is allowed.
FaultRecord AArch64.CheckS2Permission(AArch64.CheckBreakpoint(bits(64) vaddress,Permissions perms, bits(64) vaddress, bits(52) ipaddress,
integer level, AccType acctype, boolean iswrite, boolean NS,
boolean s2fs1walk, boolean hwupdatewalk)
assert (acctype, integer size)
assert !IsSecureEL2EnabledELUsingAArch32() || ((HaveELS1TranslationRegime(());
assert (EL2UsingAArch32) && !() && size IN {2,4}) || size == 4;
match = FALSE;
for i = 0 toIsSecureUInt() && !(ID_AA64DFR0_EL1.BRPs)
match_i =ELUsingAArch32AArch64.BreakpointMatch((i, vaddress, acctype, size);
match = match || match_i;
if match &&EL2HaltOnBreakpointOrWatchpoint))) &&() then
reason = HasS2TranslationDebugHalt_Breakpoint();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if; HaveExtendedExecuteNeverExtHalt() then
case perms.xn:perms.xxn of
when '00' xn = FALSE;
when '01' xn = PSTATE.EL ==(reason);
elsif match then
acctype = EL1;
when '10' xn = TRUE;
when '11' xn = PSTATE.EL == EL0;
else
xn = perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype == AccType_IFETCH && !s2fs1walk then
fail = xn;
elsif (acctype IN {;
iswrite = FALSE;
return AccType_ATOMICRWAArch64.DebugFault, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk then
fail = !r || !w; // Report as a read failure if a read of the location would fail.
if fail then iswrite = r;
elsif acctype IN {AccType_IC, AccType_DC} && !s2fs1walk then
if UsingAArch32() then
fail = FALSE;
elsif iswrite && acctype == AccType_DC then
fail = !w;
elsif PSTATE.EL == EL0 && !iswrite then
fail = !r && !(acctype == AccType_IC && !(boolean IMPLEMENTATION_DEFINED "Permission fault on EL0 IC_IVAU execution"));
else
fail = FALSE;
elsif iswrite && !s2fs1walk then
fail = !w;
elsif hwupdatewalk then
fail = !w;
else
fail = !r;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch64.PermissionFault(ipaddress,NS, level, acctype,
iswrite, secondstage, s2fs1walk);
(acctype, iswrite);
else
return AArch64.NoFault();
// AArch64.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64
// translation regime, when either debug exceptions are enabled, or halting debug is enabled
// and halting is allowed.
// AArch64.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
FaultRecord AArch64.CheckBreakpoint(bits(64) vaddress,AArch64.CheckDebug(bits(64) vaddress, AccType acctype, integer size)
assert !acctype, boolean iswrite, integer size)ELUsingAArch32FaultRecord(fault =S1TranslationRegimeAArch64.NoFault());
assert (();
d_side = (acctype !=UsingAArch32AccType_IFETCH() && size IN {2,4}) || size == 4;
match = FALSE;
for i = 0 to);
if GetNumBreakpointsHaveNV2Ext() - 1
match_i =() && acctype == AArch64.BreakpointMatchAccType_NV2REGISTER(i, vaddress, acctype, size);
match = match || match_i;
if match &&then
mask = '0';
generate_exception = HaltOnBreakpointOrWatchpointAArch64.GenerateDebugExceptionsFrom() then
reason =( DebugHalt_BreakpointEL2;,
HaltIsSecure(reason);
elsif match then
acctype =(), mask) && MDSCR_EL1.MDE == '1';
else
generate_exception = AccType_IFETCHAArch64.GenerateDebugExceptions;
iswrite = FALSE;
return() && MDSCR_EL1.MDE == '1';
halt = AArch64.DebugFaultHaltOnBreakpointOrWatchpoint(acctype, iswrite);
else
return();
if generate_exception || halt then
if d_side then
fault = (vaddress, acctype, iswrite, size);
else
fault = AArch64.CheckBreakpointAArch64.NoFaultAArch64.CheckWatchpoint();(vaddress, acctype, size);
return fault;
// AArch64.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
// AArch64.CheckWatchpoint()
// =========================
// Called before accessing the memory location of "size" bytes at "address",
// when either debug exceptions are enabled for the access, or halting debug
// is enabled and halting is allowed.
FaultRecord AArch64.CheckDebug(bits(64) vaddress,AArch64.CheckWatchpoint(bits(64) vaddress, AccType acctype, boolean iswrite, integer size)acctype,
boolean iswrite, integer size)
assert !
FaultRecordELUsingAArch32 fault =( S1TranslationRegime());
if acctype IN {AccType_TTW, AccType_IC, AccType_AT} then
return AArch64.NoFault();
d_side = (acctype != if acctype == AccType_IFETCHAccType_DC);
ifthen
if !iswrite then
return HaveNV2ExtAArch64.NoFault() && acctype ==();
match = FALSE;
ispriv = AccType_NV2REGISTERAArch64.AccessUsesEL then
mask = '0';
generate_exception =(acctype) != AArch64.GenerateDebugExceptionsFromEL0(;
for i = 0 toEL2UInt,(ID_AA64DFR0_EL1.WRPs)
match = match || IsSecureAArch64.WatchpointMatch(), mask) && MDSCR_EL1.MDE == '1';
else
generate_exception =(i, vaddress, size, ispriv, acctype, iswrite);
if match && AArch64.GenerateDebugExceptions() && MDSCR_EL1.MDE == '1';
halt = HaltOnBreakpointOrWatchpoint();
if generate_exception || halt then
if d_side then
fault =() then
if acctype != AArch64.CheckWatchpointAccType_NONFAULT(vaddress, acctype, iswrite, size);
else
fault =&& acctype != then
reason = DebugHalt_Watchpoint;
EDWAR = vaddress;
Halt(reason);
else
// Fault will be reported and cancelled
return AArch64.DebugFault(acctype, iswrite);
elsif match then
return AArch64.DebugFault(acctype, iswrite);
else
return AArch64.NoFaultAArch64.CheckBreakpointAccType_CNOTFIRST(vaddress, acctype, size);
return fault;();
// AArch64.CheckWatchpoint()
// AArch64.AccessFlagFault()
// =========================
// Called before accessing the memory location of "size" bytes at "address",
// when either debug exceptions are enabled for the access, or halting debug
// is enabled and halting is allowed.
FaultRecord AArch64.CheckWatchpoint(bits(64) vaddress,AArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level, AccType acctype,
boolean iswrite, integer size)
assert !acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
returnELUsingAArch32AArch64.CreateFaultRecord(S1TranslationRegimeFault_AccessFlag());
if acctype IN {AccType_TTW, AccType_IC, AccType_AT, AccType_ATPAN} then
return AArch64.NoFault();
if acctype == AccType_DC then
if !iswrite then
return AArch64.NoFault();
match = FALSE;
match_on_read = FALSE;
ispriv = AArch64.AccessUsesEL(acctype) != EL0;
for i = 0 to GetNumWatchpoints() - 1
if AArch64.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite) then
match = TRUE;
if DBGWCR_EL1[i].LSC<0> == '1' then
match_on_read = TRUE;
if match && acctype == AccType_ATOMICRW then
iswrite = !match_on_read;
if match && HaltOnBreakpointOrWatchpoint() then
if acctype != AccType_NONFAULT && acctype != AccType_CNOTFIRST then
reason = DebugHalt_Watchpoint;
EDWAR = vaddress;
Halt(reason);
else
// Fault will be reported and cancelled
return AArch64.DebugFault(acctype, iswrite);
elsif match then
return AArch64.DebugFault(acctype, iswrite);
else
return AArch64.NoFault();, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AccessFlagFault()
// =========================
// AArch64.AddressSizeFault()
// ==========================
FaultRecord AArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level,AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch64.CreateFaultRecord(Fault_AccessFlagFault_AddressSize, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AddressSizeFault()
// ==========================
// AArch64.AlignmentFault()
// ========================
FaultRecord AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level,AArch64.AlignmentFault(
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype, boolean iswrite, boolean secondstage)
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return AArch64.CreateFaultRecord(Fault_AddressSizeFault_Alignment, ipaddress, NS, level, acctype, iswrite,
, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AlignmentFault()
// ========================
// AArch64.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
FaultRecord AArch64.AlignmentFault(AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAccTypeFault_AsyncParity acctype, boolean iswrite, boolean secondstage)
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
returnelse Fault_AsyncExternal;
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype = AccType_NORMAL;
iswrite = boolean UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_Alignment, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag,
errortype, secondstage, s2fs1walk);
// AArch64.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
// AArch64.DebugFault()
// ====================
FaultRecord AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAArch64.DebugFault( Fault_AsyncParityAccType elseacctype, boolean iswrite)
ipaddress = bits(52) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return Fault_AsyncExternalAArch64.CreateFaultRecord;
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype =( AccType_NORMALFault_Debug;
iswrite = boolean UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag,
errortype, secondstage, s2fs1walk);, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.DebugFault()
// ====================
// AArch64.ExclusiveFault()
// ========================
FaultRecord AArch64.DebugFault(AArch64.ExclusiveFault(AccType acctype, boolean iswrite)
ipaddress = bits(52) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
errortype = bits(2) UNKNOWN;
secondstage = boolean UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return AArch64.CreateFaultRecord(Fault_DebugFault_Exclusive, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.ExclusiveFault()
// ========================
// AArch64.NoFault()
// =================
FaultRecord AArch64.ExclusiveFault(AArch64.NoFault()
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype =AccTypeAccType_NORMAL acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk)
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
;
iswrite = boolean UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_ExclusiveFault_None, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.NoFault()
// =================
// AArch64.PermissionFault()
// =========================
FaultRecord AArch64.NoFault()
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype =AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level, AccType_NORMALAccType;
iswrite = boolean UNKNOWN;
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_NoneFault_Permission, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.PermissionFault()
// =========================
// AArch64.TranslationFault()
// ==========================
FaultRecord AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level,AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch64.CreateFaultRecord(Fault_PermissionFault_Translation, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.TranslationFault()
// ==========================
// AArch64.CheckAndUpdateDescriptor()
// ==================================
// Check and update translation table descriptor if hardware update is configured
FaultRecord AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level,AArch64.CheckAndUpdateDescriptor(
DescriptorUpdate result, FaultRecord fault,
boolean secondstage, bits(64) vaddress, AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype,
boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return boolean hw_update_AF = FALSE;
boolean hw_update_AP = FALSE;
// Check if access flag can be updated
// Address translation instructions are permitted to update AF but not required
if result.AF then
if fault.statuscode == AArch64.CreateFaultRecordFault_None(||(Unpredictable_AFUPDATE) == Constraint_TRUE then
hw_update_AF = TRUE;
if result.AP && fault.statuscode == Fault_None then
write_perm_req = (iswrite || acctype IN {AccType_ATOMICRW,AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk;
hw_update_AP = (write_perm_req && !(acctype IN {AccType_AT, AccType_DC, AccType_DC_UNPRIV})) || hwupdatewalk;
if hw_update_AF || hw_update_AP then
if secondstage || !HasS2Translation() then
descaddr2 = result.descaddr;
else
hwupdatewalk = TRUE;
descaddr2 = AArch64.SecondStageWalk(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
if IsFault(descaddr2) then
return descaddr2.fault;
accdesc = CreateAccessDescriptor(AccType_ATOMICRW);
desc = _Mem[descaddr2, 8, accdesc, iswrite];
el = AArch64.AccessUsesEL(acctype);
case el of
when EL3
reversedescriptors = SCTLR_EL3.EE == '1';
when EL2
reversedescriptors = SCTLR_EL2.EE == '1';
otherwise
reversedescriptors = SCTLR_EL1.EE == '1';
if reversedescriptors then
desc = BigEndianReverse(desc);
if hw_update_AF then
desc<10> = '1';
if hw_update_AP then
desc<7> = (if secondstage then '1' else '0');
_Mem[descaddr2,8,accdesc] = if reversedescriptors then BigEndianReverseFault_TranslationConstrainUnpredictable, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);(desc) else desc;
return fault;
// AArch64.CheckAndUpdateDescriptor()
// ==================================
// Check and update translation table descriptor if hardware update is configured
// AArch64.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
FaultRecordAddressDescriptor AArch64.CheckAndUpdateDescriptor(AArch64.FirstStageTranslate(bits(64) vaddress,DescriptorUpdateAccType result,acctype, boolean iswrite,
boolean wasaligned, integer size)
if FaultRecordHaveNV2Ext fault,
boolean secondstage, bits(64) vaddress,() && acctype == AccTypeAccType_NV2REGISTER acctype,
boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk)
boolean hw_update_AF = FALSE;
boolean hw_update_AP = FALSE;
// Check if access flag can be updated
// Address translation instructions are permitted to update AF but not required
if result.AF then
if fault.statuscode ==then
s1_enabled = SCTLR_EL2.M == '1';
elsif Fault_NoneHasS2Translation ||() then
s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1';
else
s1_enabled = ConstrainUnpredictableSCTLR([].M == '1';Unpredictable_AFUPDATETLBRecord) ==S1;
S1.addrdesc.fault = Constraint_TRUEAArch64.NoFault then
hw_update_AF = TRUE;
();
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if result.AP && fault.statuscode == if s1_enabled then // First stage enabled
S1 = Fault_NoneAArch64.TranslationTableWalk then
write_perm_req = (iswrite || acctype IN {(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE;
if acctype ==AccType_ATOMICRWAccType_IFETCH,then
InGuardedPage = S1.GP == '1'; // Global state updated on instruction fetch that denotes
// if the fetched instruction is from a guarded page.
else
S1 =AccType_ORDEREDRWAArch64.TranslateAddressS1Off,(vaddress, acctype, iswrite);
permissioncheck = FALSE;
InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled
if ! AccType_ORDEREDATOMICRWIsFault }) && !s2fs1walk;
hw_update_AP = (write_perm_req && !(acctype IN {(S1.addrdesc) &&AccType_ATUsingAArch32,() && AccType_ATPANHaveTrapLoadStoreMultipleDeviceExt,() && AccType_DCAArch32.ExecutingLSMInstr})) || hwupdatewalk;
if hw_update_AF || hw_update_AP then
if secondstage || !() then
if S1.addrdesc.memattrs.memtype ==HasS2TranslationMemType_Device() then
descaddr2 = result.descaddr;
else
hwupdatewalk = TRUE;
descaddr2 =&& S1.addrdesc.memattrs.device != AArch64.SecondStageWalkDeviceType_GRE(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
ifthen
nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) &&
!IsFault(descaddr2) then
return descaddr2.fault;
accdesc =(S1.addrdesc) && S1.addrdesc.memattrs.memtype == CreateAccessDescriptorMemType_Device() then
S1.addrdesc.fault =AccType_ATOMICRWAArch64.AlignmentFault);
desc = _Mem[descaddr2, 8, accdesc, iswrite];
el =(acctype, iswrite, secondstage);
if ! AArch64.AccessUsesELIsFault(acctype);
case el of
when(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = EL3AArch64.CheckPermission
reversedescriptors = SCTLR_EL3.EE == '1';
when(S1.perms, vaddress, S1.level,
S1.addrdesc.paddress.NS,
acctype,
iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (! EL2IsFault
reversedescriptors = SCTLR_EL2.EE == '1';
otherwise
reversedescriptors = SCTLR_EL1.EE == '1';
if reversedescriptors then
desc =(S1.addrdesc) && S1.addrdesc.memattrs.memtype == BigEndianReverseMemType_Device(desc);
if hw_update_AF then
desc<10> = '1';
if hw_update_AP then
desc<7> = (if secondstage then '1' else '0');
_Mem[descaddr2,8,accdesc] = if reversedescriptors then&&
acctype == ) then
S1.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check and update translation table descriptor if required
hwupdatewalk = FALSE;
s2fs1walk = FALSE;
S1.addrdesc.fault = AArch64.CheckAndUpdateDescriptorBigEndianReverseAccType_IFETCH(desc) else desc;
(S1.descupdate, S1.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
return fault; return S1.addrdesc;
// AArch64.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch64.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
AddressDescriptor AArch64.FirstStageTranslate(bits(64) vaddress,AArch64.FullTranslate(bits(64) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
boolean wasaligned, integer size)
if // First Stage Translation
S1 = AArch64.FirstStageTranslate(vaddress, acctype, iswrite, wasaligned, size);
if !IsFault(S1) && !(HaveNV2Ext() && acctype == AccType_NV2REGISTER then
s1_enabled = SCTLR_EL2.M == '1';
elsif) && HasS2Translation() then
s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1';
else
s1_enabled = s2fs1walk = FALSE;
hwupdatewalk = FALSE;
result = SCTLRAArch64.SecondStageTranslate[].M == '1';
TLBRecord S1;
S1.addrdesc.fault = AArch64.NoFault();
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
permissioncheck = TRUE;
if s1_enabled then // First stage enabled
S1 = AArch64.TranslationTableWalk(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE;
if acctype == AccType_IFETCH then
SetInGuardedPage(S1.GP == '1'); // Global state updated on instruction fetch that denotes
// if the fetched instruction is from a guarded page.
if acctype == AccType_ATOMICLS64 then
if S1.addrdesc.memattrs.memtype == MemType_Normal then
if S1.addrdesc.memattrs.inner.attrs != MemAttr_NC || S1.addrdesc.memattrs.outer.attrs != MemAttr_NC then
S1.addrdesc.fault = AArch64.ExclusiveFault(acctype, iswrite, secondstage, s2fs1walk);
else
S1 = AArch64.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE;
SetInGuardedPage(FALSE); // No memory is guarded when stage 1 address translation is disabled
if !IsFault(S1.addrdesc) && UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && acctype == AccType_A32LSMD then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) &&
!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch64.CheckPermission(S1.perms, vaddress, S1.level,
S1.addrdesc.paddress.NS,
acctype,
iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check and update translation table descriptor if required
hwupdatewalk = FALSE;
s2fs1walk = FALSE;
S1.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S1.descupdate, S1.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);
else
result = S1;
return S1.addrdesc; return result;
// AArch64.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
// AArch64.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
AddressDescriptor AArch64.FullTranslate(bits(64) vaddress,AArch64.SecondStageTranslate( AddressDescriptor S1, bits(64) vaddress,
AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
// First Stage Translation
S1 =acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size, boolean hwupdatewalk)
assert AArch64.FirstStageTranslateHasS2Translation(vaddress, acctype, iswrite, wasaligned, size);
if !();
s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<51:0>;
NS = S1.paddress.NS == '1';
S2 =AArch64.TranslationTableWalk(ipaddress, NS, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if (((!wasaligned && acctype != AccType_IFETCH) ||
(acctype == AccType_DCZVA && !s2fs1walk)) &&
S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1) && !((S2.addrdesc)) then
S2.addrdesc.fault =HaveNV2ExtAArch64.AlignmentFault() && acctype ==(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if ! AccType_NV2REGISTERIsFault) &&(S2.addrdesc) then
S2.addrdesc.fault = HasS2TranslationAArch64.CheckS2Permission() then
s2fs1walk = FALSE;
hwupdatewalk = FALSE;
result =(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, NS,s2fs1walk, hwupdatewalk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && ! (S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S2.addrdesc = AArch64.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
acctype, iswrite,
secondstage, s2fs1walk);
if (s2fs1walk && !IsFault(S2.addrdesc) &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
// Check for protected table walk.
if HCR_EL2.PTW == '1' then
S2.addrdesc.fault = AArch64.PermissionFault(ipaddress,
NS, S2.level,
acctype, iswrite, secondstage, s2fs1walk);
else
// Translation table walk occurs as Normal Non-cacheable memory.
S2.addrdesc.memattrs.memtype = MemType_Normal;
S2.addrdesc.memattrs.inner.attrs = MemAttr_NC;
S2.addrdesc.memattrs.outer.attrs = MemAttr_NC;
S2.addrdesc.memattrs.shareable = TRUE;
S2.addrdesc.memattrs.outershareable = TRUE;
// Check and update translation table descriptor if required
S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
if s2fs1walk then
result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, AccType_TTW);
else
result = AArch64.CombineS1S2DescAArch64.SecondStageTranslateIsFault(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);
(S1, S2.addrdesc, acctype);
else
result = S1;
return result;
// AArch64.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch64.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation table walk access.
AddressDescriptor AArch64.SecondStageTranslate(AArch64.SecondStageWalk(AddressDescriptor S1, bits(64) vaddress,
AccType acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size, boolean hwupdatewalk)
acctype,
boolean iswrite, integer size, boolean hwupdatewalk)
assert HasS2Translation();
s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
permissioncheck = TRUE;
ipaddress = S1.paddress.address<51:0>;
NS = S1.paddress.NS == '1';
S2 = s2fs1walk = TRUE;
wasaligned = TRUE;
return AArch64.TranslationTableWalkAArch64.SecondStageTranslate(ipaddress, NS, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if !IsFault(S2.addrdesc) && !s2fs1walk && S2.addrdesc.memattrs.memtype == MemType_Device
&& ((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) then
S2.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if !IsFault(S2.addrdesc) && permissioncheck then
S2.addrdesc.fault = AArch64.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, NS,s2fs1walk, hwupdatewalk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S2.addrdesc = AArch64.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
acctype, iswrite,
secondstage, s2fs1walk);
if (s2fs1walk && !IsFault(S2.addrdesc) &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
// Check for protected table walk.
if HCR_EL2.PTW == '1' then
S2.addrdesc.fault = AArch64.PermissionFault(ipaddress,
NS, S2.level,
acctype, iswrite, secondstage, s2fs1walk);
else
// Translation table walk occurs as Normal Non-cacheable memory.
S2.addrdesc.memattrs.memtype = MemType_Normal;
S2.addrdesc.memattrs.inner.attrs = MemAttr_NC;
S2.addrdesc.memattrs.outer.attrs = MemAttr_NC;
S2.addrdesc.memattrs.shareable = TRUE;
S2.addrdesc.memattrs.outershareable = TRUE;
// Check and update translation table descriptor if required
S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
if !s2fs1walk && acctype == AccType_ATOMICLS64 then
if S2.addrdesc.memattrs.memtype == MemType_Normal then
if S2.addrdesc.memattrs.inner.attrs != MemAttr_NC || S2.addrdesc.memattrs.outer.attrs != MemAttr_NC then
S2.addrdesc.fault = AArch64.ExclusiveFault(acctype, iswrite, secondstage, s2fs1walk);
if s2fs1walk then
result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, AccType_TTW);
else
result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, acctype);
else
result = S1;
return result;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);
// AArch64.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation table walk access.
// AArch64.TranslateAddress()
// ==========================
// Main entry point for translating an address
AddressDescriptor AArch64.SecondStageWalk(AArch64.TranslateAddress(bits(64) vaddress,AddressDescriptor S1, bits(64) vaddress, AccType acctype,
boolean iswrite, integer size, boolean hwupdatewalk)
acctype, boolean iswrite,
boolean wasaligned, integer size)
assert result = HasS2TranslationAArch64.FullTranslate();
s2fs1walk = TRUE;
wasaligned = TRUE;
return(vaddress, acctype, iswrite, wasaligned, size);
if ! (result) then
result.fault = AArch64.CheckDebug(vaddress, acctype, iswrite, size);
// Update virtual address for abort functions
result.vaddress = ZeroExtendAArch64.SecondStageTranslateIsFault(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);(vaddress);
return result;
// AArch64.TranslateAddress()
// ==========================
// Main entry point for translating an address
// AArch64.TranslationTableWalk()
// ==============================
// Returns a result of a translation table walk
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
AddressDescriptorTLBRecord AArch64.TranslateAddress(bits(64) vaddress,AArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
result =acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assert ! AArch64.FullTranslateELUsingAArch32(vaddress, acctype, iswrite, wasaligned, size);
if !(S1TranslationRegime());
else
assert (IsSecureEL2Enabled() || (HaveEL(EL2) && !IsSecure() && !ELUsingAArch32(EL2))) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
descaddr.memattrs.memtype = MemType_Normal;
// Derived parameters for the translation table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64
// stride = Log2(Address per Level) - Bits of address consumed at each level
// firstblocklevel = First level where a block entry is allowed
// ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el = AArch64.AccessUsesEL(acctype);
isprivileged = AArch64.AccessUsesEL(acctype) != EL0;
top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el);
if el == EL3 then
largegrain = TCR_EL3.TG0 == '01';
midgrain = TCR_EL3.TG0 == '10';
inputsize = 64 - UInt(TCR_EL3.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL3.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL3;
descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage);
reversedescriptors = SCTLR_EL3.EE == '1';
lookupsecure = TRUE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1';
elsif ELIsInHost(el) then
if inputaddr<top> == '0' then
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize = 64 - UInt(TCR_EL2.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD0 == '1');
if el == EL0 && TCR_EL2.NFD0 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL2.T1SZ);
largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL2.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD1 == '1');
if el == EL0 && TCR_EL2.NFD1 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR1_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1';
ps = TCR_EL2.IPS;
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
elsif el == EL2 then
inputsize = 64 - UInt(TCR_EL2.T0SZ);
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1';
else
if inputaddr<top> == '0' then
inputsize = 64 - UInt(TCR_EL1.T0SZ);
largegrain = TCR_EL1.TG0 == '01';
midgrain = TCR_EL1.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD0 == '1');
if el == EL0 && TCR_EL1.NFD0 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR0_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL1.T1SZ);
largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL1.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD1 == '1');
if el == EL0 && TCR_EL1.NFD1 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR1_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1';
ps = TCR_EL1.IPS;
reversedescriptors = SCTLR_EL1.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1';
if largegrain then
grainsize = 16; // Log2(64KB page size)
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride));
else
// Second stage translation
inputaddr = ZeroExtend(ipaddress);
if IsSecureBelowEL3() then
// Second stage for Secure translation regime
if s1_nonsecure then // Non-secure IPA space
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = VTCR_EL2.NSW;
else // Secure IPA space
t0size = VSTCR_EL2.T0SZ;
tg0 = VSTCR_EL2.TG0;
nswalk = VSTCR_EL2.SW;
// Stage 2 translation accesses the Non-secure PA space or the Secure PA space
if nswalk == '1' then
// When walk is Non-secure, access must be to the Non-secure PA space
nsaccess = '1';
elsif !s1_nonsecure then
// When walk is Secure and in the Secure IPA space,
// access is specified by VSTCR_EL2.SA
nsaccess = VSTCR_EL2.SA;
elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then
// When walk is Secure and in the Non-secure IPA space,
// access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space
nsaccess = '1';
else
// When walk is Secure and in the Non-secure IPA space,
// if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA
nsaccess = VTCR_EL2.NSA;
else
// Second stage for Non-secure translation regime
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = '1';
nsaccess = '1';
inputsize = 64 - UInt(t0size);
largegrain = tg0 == '01';
midgrain = tg0 == '10';
inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48;
if !Have52BitPAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = VTCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1';
if IsSecureEL2Enabled() then
lookupsecure = !s1_nonsecure;
else
lookupsecure = FALSE;
if lookupsecure then
baseregister = VSTTBR_EL2;
startlevel = UInt(VSTCR_EL2.SL0);
else
baseregister = VTTBR_EL2;
startlevel = UInt(VTCR_EL2.SL0);
if largegrain then
grainsize = 16; // Log2(64KB page size)
level = 3 - startlevel;
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
level = 3 - startlevel;
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
if HaveSmallTranslationTableExt() && startlevel == 3 then
level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSTCR_EL2.SL0 == 0b11) for 4KB granule
else
level = 2 - startlevel;
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// Limits on IPA controls based on implemented PA size. Level 0 is only
// supported by small grain translations
if largegrain then // 64KB pages
// Level 1 only supported if implemented PA size is greater than 2^42 bytes
if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE;
elsif midgrain then // 16KB pages
// Level 1 only supported if implemented PA size is greater than 2^40 bytes
if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE;
else // Small grain, 4KB pages
// Level 0 only supported if implemented PA size is greater than 2^42 bytes
if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE;
// If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE
inputsizecheck = inputsize;
if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then
case ConstrainUnpredictable(Unpredictable_LARGEIPA) of
when Constraint_FORCE
// Restrict the inputsize to the PAMax value
inputsize = PAMax();
inputsizecheck = PAMax();
when Constraint_FORCENOSLCHECK
// As FORCE, except use the configured inputsize in the size checks below
inputsize = PAMax();
when Constraint_FAULT
// Generate a translation fault
basefound = FALSE;
otherwise
Unreachable();
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 0; // AArch32 reports this as a level 1 fault
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
case ps of
when '000' outputsize = 32;
when '001' outputsize = 36;
when '010' outputsize = 40;
when '011' outputsize = 42;
when '100' outputsize = 44;
when '101' outputsize = 48;
when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48);
otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value";
if outputsize > PAMax() then outputsize = PAMax();
if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then
level = 0;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
if outputsize == 52 then
z = (if baselowerbound < 6 then 6 else baselowerbound);
baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(z);
else
baseaddress = ZeroExtend(baseregister<47:baselowerbound>:Zeros(baselowerbound));
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = baseaddress OR index;
descaddr.paddress.NS = if secondstage then nswalk else ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
hwupdatewalk = FALSE;
descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
// Check for a fault on the stage 2 walk
if IsFault(result) then
result.fault =(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = AArch64.CheckDebugZeroExtend(vaddress, acctype, iswrite, size);
(vaddress);
// Update virtual address for abort functions
result.vaddress = accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc, iswrite];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 ||
(HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then
// Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set.
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if (outputsize < 52 && largegrain && (PAMax() == 52 ||
boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]") &&
!IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if outputsize == 52 then
baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize);
else
baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize));
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
if apply_nvnv1_effect then
pxn_table = pxn_table OR desc<60>;
else
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes
if !singlepriv then
if !apply_nvnv1_effect then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Check block size is supported at this level
if level < firstblocklevel then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check for misprogramming of the contiguous bit
if largegrain then
num_ch_entries = 5;
elsif midgrain then
num_ch_entries = if level == 3 then 7 else 5;
else
num_ch_entries = 4;
contiguousbitcheck = inputsize < (addrselectbottom + num_ch_entries);
if contiguousbitcheck && desc<52> == '1' then
if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Unpack the descriptor into address and upper and lower block attributes
if largegrain then
outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
else
outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>);
// When 52-bit PA is supported, for 64 Kbyte translation granule,
// block size might be larger than the supported output address size
if ((outputsize < 52 && !IsZero(outputaddress<51:48>) && largegrain && (PAMax() == 52 ||
boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]")) ||
(outputsize < 48 && !IsZero(outputaddress<47:outputsize>))) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check Access Flag
if desc<10> == '0' then
if !update_AF then
result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
else
result.descupdate.AF = TRUE;
if update_AP && desc<51> == '1' then
// If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1'
if !secondstage && desc<7> == '1' then
desc<7> = '0';
result.descupdate.AP = TRUE;
elsif secondstage && desc<7> == '0' then
desc<7> = '1';
result.descupdate.AP = TRUE;
// Required descriptor if AF or AP[2]/S2AP[2] needs update
result.descupdate.descaddr = descaddr;
if apply_nvnv1_effect then
pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN
xn = '0'; // XN is '0'
ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed
else
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_TTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = nsaccess;
result.addrdesc.paddress.address = outputaddress;
result.addrdesc.fault = AArch64.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt(vaddress);
() then result.CnP = baseregister<0>;
return result;
// AArch64.TranslationTableWalk()
// ==============================
// Returns a result of a translation table walk
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
TLBRecord// ClearStickyErrors()
// =================== AArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress,ClearStickyErrors()
EDSCR.TXU = '0'; // Clear TX underrun flag
EDSCR.RXO = '0'; // Clear RX overrun flag
if
AccTypeHalted acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assert !() then // in Debug state
EDSCR.ITO = '0'; // Clear ITR overrun flag
// If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared.
// The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described
// in the pseudocode.
ifELUsingAArch32Halted(() && EDSCR.ITE == '0' &&S1TranslationRegimeConstrainUnpredictableBool());
else
assert ((IsSecureEL2EnabledUnpredictable_CLEARERRITEZERO() || (HaveEL(EL2) && !IsSecure() && !ELUsingAArch32(EL2))) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
result.descupdate.AF = FALSE;
result.descupdate.AP = FALSE;
descaddr.memattrs.memtype = MemType_Normal;
// Derived parameters for the translation table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64
// stride = Log2(Address per Level) - Bits of address consumed at each level
// firstblocklevel = First level where a block entry is allowed
// ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el = AArch64.AccessUsesEL(acctype);
isprivileged = AArch64.AccessUsesEL(acctype) != EL0;
top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el);
if el == EL3 then
largegrain = TCR_EL3.TG0 == '01';
midgrain = TCR_EL3.TG0 == '10';
inputsize = 64 - UInt(TCR_EL3.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL3.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL3;
descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage);
reversedescriptors = SCTLR_EL3.EE == '1';
lookupsecure = TRUE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1';
elsif ELIsInHost(el) then
if inputaddr<top> == '0' then
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize = 64 - UInt(TCR_EL2.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD0 == '1');
if el == EL0 && TCR_EL2.NFD0 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL2.T1SZ);
largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL2.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD1 == '1');
if el == EL0 && TCR_EL2.NFD1 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR1_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1';
ps = TCR_EL2.IPS;
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
elsif el == EL2 then
inputsize = 64 - UInt(TCR_EL2.T0SZ);
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1';
else
if inputaddr<top> == '0' then
inputsize = 64 - UInt(TCR_EL1.T0SZ);
largegrain = TCR_EL1.TG0 == '01';
midgrain = TCR_EL1.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD0 == '1');
if el == EL0 && TCR_EL1.NFD0 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR0_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL1.T1SZ);
largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL1.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
if !Have52BitVAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD1 == '1');
if el == EL0 && TCR_EL1.NFD1 == '1' then
disabled = disabled || acctype == AccType_NONFAULT;
baseregister = TTBR1_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1';
ps = TCR_EL1.IPS;
reversedescriptors = SCTLR_EL1.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1';
if largegrain then
grainsize = 16; // Log2(64KB page size)
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride));
else
// Second stage translation
inputaddr = ZeroExtend(ipaddress);
if IsSecureBelowEL3() then
// Second stage for Secure translation regime
if s1_nonsecure then // Non-secure IPA space
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = VTCR_EL2.NSW;
else // Secure IPA space
t0size = VSTCR_EL2.T0SZ;
tg0 = VSTCR_EL2.TG0;
nswalk = VSTCR_EL2.SW;
// Stage 2 translation accesses the Non-secure PA space or the Secure PA space
if nswalk == '1' then
// When walk is Non-secure, access must be to the Non-secure PA space
nsaccess = '1';
elsif !s1_nonsecure then
// When walk is Secure and in the Secure IPA space,
// access is specified by VSTCR_EL2.SA
nsaccess = VSTCR_EL2.SA;
elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then
// When walk is Secure and in the Non-secure IPA space,
// access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space
nsaccess = '1';
else
// When walk is Secure and in the Non-secure IPA space,
// if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA
nsaccess = VTCR_EL2.NSA;
else
// Second stage for Non-secure translation regime
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = '1';
nsaccess = '1';
inputsize = 64 - UInt(t0size);
largegrain = tg0 == '01';
midgrain = tg0 == '10';
inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48;
if !Have52BitPAExt() && inputsize > inputsize_max then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_max;
inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = VTCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1';
if IsSecureEL2Enabled() then
lookupsecure = !s1_nonsecure;
else
lookupsecure = FALSE;
if lookupsecure then
baseregister = VSTTBR_EL2;
startlevel = UInt(VSTCR_EL2.SL0);
else
baseregister = VTTBR_EL2;
startlevel = UInt(VTCR_EL2.SL0);
if largegrain then
grainsize = 16; // Log2(64KB page size)
level = 3 - startlevel;
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
level = 3 - startlevel;
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
if HaveSmallTranslationTableExt() && startlevel == 3 then
level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSTCR_EL2.SL0 == 0b11) for 4KB granule
else
level = 2 - startlevel;
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// Limits on IPA controls based on implemented PA size. Level 0 is only
// supported by small grain translations
if largegrain then // 64KB pages
// Level 1 only supported if implemented PA size is greater than 2^42 bytes
if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE;
elsif midgrain then // 16KB pages
// Level 1 only supported if implemented PA size is greater than 2^40 bytes
if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE;
else // Small grain, 4KB pages
// Level 0 only supported if implemented PA size is greater than 2^42 bytes
if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE;
// If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE
inputsizecheck = inputsize;
if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then
case ConstrainUnpredictable(Unpredictable_LARGEIPA) of
when Constraint_FORCE
// Restrict the inputsize to the PAMax value
inputsize = PAMax();
inputsizecheck = PAMax();
when Constraint_FORCENOSLCHECK
// As FORCE, except use the configured inputsize in the size checks below
inputsize = PAMax();
when Constraint_FAULT
// Generate a translation fault
basefound = FALSE;
otherwise
Unreachable();
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 0; // AArch32 reports this as a level 1 fault
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
case ps of
when '000' outputsize = 32;
when '001' outputsize = 36;
when '010' outputsize = 40;
when '011' outputsize = 42;
when '100' outputsize = 44;
when '101' outputsize = 48;
when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48);
otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value";
if outputsize > PAMax() then outputsize = PAMax();
if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then
level = 0;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
if outputsize == 52 then
z = (if baselowerbound < 6 then 6 else baselowerbound);
baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(z);
else
baseaddress = ZeroExtend(baseregister<47:baselowerbound>:Zeros(baselowerbound));
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = baseaddress OR index;
descaddr.paddress.NS = if secondstage then nswalk else ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
hwupdatewalk = FALSE;
descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
// Check for a fault on the stage 2 walk
if IsFault(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc, iswrite];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 ||
(HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then
// Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set.
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if (outputsize < 52 && largegrain && (PAMax() == 52 ||
boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]") &&
!IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if outputsize == 52 then
baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize);
else
baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize));
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
if apply_nvnv1_effect then
pxn_table = pxn_table OR desc<60>;
else
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes
if !singlepriv then
if !apply_nvnv1_effect then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Check block size is supported at this level
if level < firstblocklevel then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check for misprogramming of the contiguous bit
if largegrain then
num_ch_entries = 5;
elsif midgrain then
num_ch_entries = if level == 3 then 7 else 5;
else
num_ch_entries = 4;
contiguousbitcheck = inputsize < (addrselectbottom + num_ch_entries);
if contiguousbitcheck && desc<52> == '1' then
if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Unpack the descriptor into address and upper and lower block attributes
if largegrain then
outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
else
outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>);
// When 52-bit PA is supported, for 64 Kbyte translation granule,
// block size might be larger than the supported output address size
if ((outputsize < 52 && !IsZero(outputaddress<51:48>) && largegrain && (PAMax() == 52 ||
boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]")) ||
(outputsize < 48 && !IsZero(outputaddress<47:outputsize>))) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check Access Flag
if desc<10> == '0' then
if !update_AF then
result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
else
result.descupdate.AF = TRUE;
if update_AP && desc<51> == '1' then
// If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1'
if !secondstage && desc<7> == '1' then
desc<7> = '0';
result.descupdate.AP = TRUE;
elsif secondstage && desc<7> == '0' then
desc<7> = '1';
result.descupdate.AP = TRUE;
// Required descriptor if AF or AP[2]/S2AP[2] needs update
result.descupdate.descaddr = descaddr;
if apply_nvnv1_effect then
pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN
xn = '0'; // XN is '0'
ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed
else
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_TTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = nsaccess;
result.addrdesc.paddress.address = outputaddress;
result.addrdesc.fault = AArch64.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>;
) then
return;
EDSCR.ERR = '0'; // Clear cumulative error flag
return result; return;
// ClearStickyErrors()
// ===================// DebugTarget()
// =============
// Returns the debug exception target Exception level
bits(2)
ClearStickyErrors()
EDSCR.TXU = '0'; // Clear TX underrun flag
EDSCR.RXO = '0'; // Clear RX overrun flag
ifDebugTarget()
secure = HaltedIsSecure() then // in Debug state
EDSCR.ITO = '0'; // Clear ITR overrun flag
// If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared.
// The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described
// in the pseudocode.
if();
return HaltedDebugTargetFrom() && EDSCR.ITE == '0' && ConstrainUnpredictableBool(Unpredictable_CLEARERRITEZERO) then
return;
EDSCR.ERR = '0'; // Clear cumulative error flag
return;(secure);
// DebugTarget()
// =============
// Returns the debug exception target Exception level
// DebugTargetFrom()
// =================
bits(2) DebugTarget()
secure =DebugTargetFrom(boolean secure)
if IsSecureHaveEL();
return( ) && (!secure || (HaveSecureEL2Ext() &&
(!HaveEL(EL3) ||SCR_EL3.EEL2 == '1'))) then
if ELUsingAArch32(EL2) then
route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1');
else
route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1');
else
route_to_el2 = FALSE;
if route_to_el2 then
target = EL2;
elsif HaveEL(EL3) && HighestELUsingAArch32() && secure then
target = EL3;
else
target = EL1DebugTargetFromEL2(secure);;
return target;
// DebugTargetFrom()
// =================
// DoubleLockStatus()
// ==================
// Returns the state of the OS Double Lock.
// FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state.
// TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state.
bits(2)boolean DebugTargetFrom(boolean secure)
ifDoubleLockStatus()
if ! HaveELHaveDoubleLock(() then
return FALSE;
elsifEL2) && (!secure || (HaveSecureEL2Ext() &&
(!HaveEL(EL3) ||SCR_EL3.EEL2 == '1'))) then
if ELUsingAArch32(EL2EL1) then
route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1');
else
route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1');
else
route_to_el2 = FALSE;
if route_to_el2 then
target = return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && ! EL2Halted;
elsif();
else
return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && ! HaveELHalted(EL3) && HighestELUsingAArch32() && secure then
target = EL3;
else
target = EL1;
return target;();
// DoubleLockStatus()
// ==================
// Returns the state of the OS Double Lock.
// FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state.
// TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state.
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed, FALSE otherwise.
boolean DoubleLockStatus()
if !AllowExternalDebugAccess()
// The access may also be subject to OS Lock, power-down, etc.
ifHaveDoubleLockHaveSecureExtDebugView() then
return FALSE;
elsif return AllowExternalDebugAccess(IsAccessSecure());
else
return AllowExternalDebugAccess(ExternalSecureInvasiveDebugEnabled());
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed for the given Security state, FALSE otherwise.
boolean AllowExternalDebugAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalInvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL1EL3) then
return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && ! return SDCR.EDAD == '0';
else
return MDCR_EL3.EDAD == '0';
else
return !HaltedIsSecure();
else
return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && !Halted();();
else
return FALSE;
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed, FALSE otherwise.
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise.
boolean AllowExternalDebugAccess()
AllowExternalPMUAccess()
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() then
return AllowExternalDebugAccessAllowExternalPMUAccess(IsAccessSecure());
else
return AllowExternalDebugAccessAllowExternalPMUAccess(ExternalSecureInvasiveDebugEnabledExternalSecureNoninvasiveDebugEnabled());
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed for the given Security state, FALSE otherwise.
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed for the given
// Security state, FALSE otherwise.
boolean AllowExternalDebugAccess(boolean allow_secure)
AllowExternalPMUAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalInvasiveDebugEnabledExternalNoninvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL3) then
return SDCR.EDAD == '0';
return SDCR.EPMAD == '0';
else
return MDCR_EL3.EDAD == '0';
return MDCR_EL3.EPMAD == '0';
else
return !IsSecure();
else
return FALSE;
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise.
booleansignal DBGEN;
signal NIDEN;
signal SPIDEN;
signal SPNIDEN; AllowExternalPMUAccess()
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() then
return AllowExternalPMUAccess(IsAccessSecure());
else
return AllowExternalPMUAccess(ExternalSecureNoninvasiveDebugEnabled());
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed for the given
// Security state, FALSE otherwise.
boolean AllowExternalPMUAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalNoninvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL3) then
return SDCR.EPMAD == '0';
else
return MDCR_EL3.EPMAD == '0';
else
return !IsSecure();
else
return FALSE;
signal DBGEN;
signal NIDEN;
signal SPIDEN;
signal SPNIDEN;// ExternalInvasiveDebugEnabled()
// ==============================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the DBGEN signal.
booleanExternalInvasiveDebugEnabled()
return DBGEN == HIGH;
// ExternalInvasiveDebugEnabled()
// ==============================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the DBGEN signal.
// ExternalNoninvasiveDebugAllowed()
// =================================
// Returns TRUE if Trace and PC Sample-based Profiling are allowed
boolean ExternalInvasiveDebugEnabled()
return DBGEN == HIGH;ExternalNoninvasiveDebugAllowed()
return (ExternalNoninvasiveDebugEnabled() &&
(!IsSecure() || ExternalSecureNoninvasiveDebugEnabled() ||
(ELUsingAArch32(EL1) && PSTATE.EL == EL0 && SDER.SUNIDEN == '1')));
// ExternalNoninvasiveDebugAllowed()
// ExternalNoninvasiveDebugEnabled()
// =================================
// Returns TRUE if Trace and PC Sample-based Profiling are allowed
// This function returns TRUE if the FEAT_Debugv8p4 is implemented, otherwise this
// function is IMPLEMENTATION DEFINED.
// In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN
// OR NIDEN) signal.
boolean ExternalNoninvasiveDebugAllowed()
return (ExternalNoninvasiveDebugEnabled()
return !ExternalNoninvasiveDebugEnabledHaveNoninvasiveDebugAuth() &&
(!() ||IsSecureExternalInvasiveDebugEnabled() || ExternalSecureNoninvasiveDebugEnabled() ||
(ELUsingAArch32(EL1) && PSTATE.EL == EL0 && SDER.SUNIDEN == '1')));() || NIDEN == HIGH;
// ExternalNoninvasiveDebugEnabled()
// =================================
// This function returns TRUE if the FEAT_Debugv8p4 is implemented, otherwise this
// function is IMPLEMENTATION DEFINED.
// In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN
// OR NIDEN) signal.
// ExternalSecureInvasiveDebugEnabled()
// ====================================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal.
// CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended.
boolean ExternalNoninvasiveDebugEnabled()
return !ExternalSecureInvasiveDebugEnabled()
if !(EL3) && !IsSecureHaveNoninvasiveDebugAuthHaveEL() ||() then return FALSE;
return ExternalInvasiveDebugEnabled() || NIDEN == HIGH;() && SPIDEN == HIGH;
// ExternalSecureInvasiveDebugEnabled()
// ====================================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal.
// CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended.
// ExternalSecureNoninvasiveDebugEnabled()
// =======================================
// This function returns the value of ExternalSecureInvasiveDebugEnabled() when FEAT_Debugv8p4
// is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND
// (SPIDEN OR SPNIDEN) signal.
boolean ExternalSecureInvasiveDebugEnabled()
ExternalSecureNoninvasiveDebugEnabled()
if !HaveEL(EL3) && !IsSecure() then return FALSE;
return if () then
return ExternalNoninvasiveDebugEnabled() && (SPIDEN == HIGH || SPNIDEN == HIGH);
else
return ExternalSecureInvasiveDebugEnabledExternalInvasiveDebugEnabledHaveNoninvasiveDebugAuth() && SPIDEN == HIGH;();
// ExternalSecureNoninvasiveDebugEnabled()
// =======================================
// This function returns the value of ExternalSecureInvasiveDebugEnabled() when FEAT_Debugv8p4
// is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND
// (SPIDEN OR SPNIDEN) signal.
// Returns TRUE when an access is Secure
boolean ExternalSecureNoninvasiveDebugEnabled()
if !IsAccessSecure();HaveEL(EL3) && !IsSecure() then return FALSE;
if HaveNoninvasiveDebugAuth() then
return ExternalNoninvasiveDebugEnabled() && (SPIDEN == HIGH || SPNIDEN == HIGH);
else
return ExternalSecureInvasiveDebugEnabled();
// Returns TRUE when an access is Secure
// Returns TRUE if the Core power domain is powered on, FALSE otherwise.
boolean IsAccessSecure();IsCorePowered();
// Returns TRUE if the Core power domain is powered on, FALSE otherwise.
boolean// CheckValidStateMatch()
// ======================
// Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise
// returns Constraint_NONE.
(Constraint, bits(2), bit, bits(2)) IsCorePowered();CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt)
boolean reserved = FALSE;
// Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints
if (!isbreakpnt || !HaveAArch32EL(EL1)) && HMC:PxC == '000' && SSC != '11' then
reserved = TRUE;
// Both EL3 and EL2 are not implemented
if !HaveEL(EL3) && !HaveEL(EL2) && (HMC != '0' || SSC != '00') then
reserved = TRUE;
// EL3 is not implemented
if !HaveEL(EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then
reserved = TRUE;
// EL3 using AArch64 only
if (!HaveEL(EL3) || HighestELUsingAArch32()) && HMC:SSC:PxC == '11000' then
reserved = TRUE;
// EL2 is not implemented
if !HaveEL(EL2) && HMC:SSC:PxC == '11100' then
reserved = TRUE;
// Secure EL2 is not implemented
if !HaveSecureEL2Ext() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then
reserved = TRUE;
// Values that are not allocated in any architecture version
if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then
reserved = TRUE;
if reserved then
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then
return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN);
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
return (Constraint_NONE, SSC, HMC, PxC);
// CheckValidStateMatch()
// ======================
// Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise
// returns Constraint_NONE.
(Constraint, bits(2), bit, bits(2))// Set a Cross Trigger multi-cycle input event trigger to the specified level.
CTI_SetEventLevel( CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt)
boolean reserved = FALSE;
// Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints
if (!isbreakpnt || !HaveAArch32EL(EL1)) && HMC:PxC == '000' && SSC != '11' then
reserved = TRUE;
// Both EL3 and EL2 are not implemented
if !HaveEL(EL3) && !HaveEL(EL2) && (HMC != '0' || SSC != '00') then
reserved = TRUE;
// EL3 is not implemented
if !HaveEL(EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then
reserved = TRUE;
// EL3 using AArch64 only
if (!HaveEL(EL3) || HighestELUsingAArch32()) && HMC:SSC:PxC == '11000' then
reserved = TRUE;
// EL2 is not implemented
if !HaveEL(EL2) && HMC:SSC:PxC == '11100' then
reserved = TRUE;
// Secure EL2 is not implemented
if !HaveSecureEL2Ext() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then
reserved = TRUE;
// Values that are not allocated in any architecture version
if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then
reserved = TRUE;
if reserved then
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then
return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN);
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
return (Constraint_NONE, SSC, HMC, PxC);id, signal level);
// GetNumBreakpoints()
// ===================
// Returns the number of breakpoints implemented. This is indicated to software by
// DBGDIDR.BRPs in AArch32 state, and ID_AA64DFR0_EL1.BRPs in AArch64 state.
integer// Signal a discrete event on a Cross Trigger input event trigger. GetNumBreakpoints()
return integer IMPLEMENTATION_DEFINED "Number of breakpoints";CTI_SignalEvent(CrossTriggerIn id);
// GetNumContextAwareBreakpoints()
// ===============================
// Returns the number of context-aware breakpoints implemented. This is indicated to software by
// DBGDIDR.CTX_CMPs in AArch32 state, and ID_AA64DFR0_EL1.CTX_CMPs in AArch64 state.
integerenumeration GetNumContextAwareBreakpoints()
return integer IMPLEMENTATION_DEFINED "Number of context-aware breakpoints";CrossTriggerOut {CrossTriggerOut_DebugRequest, CrossTriggerOut_RestartRequest,
CrossTriggerOut_IRQ, CrossTriggerOut_RSVD3,
CrossTriggerOut_TraceExtIn0, CrossTriggerOut_TraceExtIn1,
CrossTriggerOut_TraceExtIn2, CrossTriggerOut_TraceExtIn3};
enumeration CrossTriggerIn {CrossTriggerIn_CrossHalt, CrossTriggerIn_PMUOverflow,
CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3,
CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1,
CrossTriggerIn_TraceExtOut2, CrossTriggerIn_TraceExtOut3};
// GetNumWatchpoints()
// ===================
// Returns the number of watchpoints implemented. This is indicated to software by
// DBGDIDR.WRPs in AArch32 state, and ID_AA64DFR0_EL1.WRPs in AArch64 state.
integer// CheckForDCCInterrupts()
// ======================= GetNumWatchpoints()
return integer IMPLEMENTATION_DEFINED "Number of watchpoints";CheckForDCCInterrupts()
commrx = (EDSCR.RXfull == '1');
commtx = (EDSCR.TXfull == '0');
// COMMRX and COMMTX support is optional and not recommended for new designs.
// SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW);
// SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW);
// The value to be driven onto the common COMMIRQ signal.
ifELUsingAArch32(EL1) then
commirq = ((commrx && DBGDCCINT.RX == '1') ||
(commtx && DBGDCCINT.TX == '1'));
else
commirq = ((commrx && MDCCINT_EL1.RX == '1') ||
(commtx && MDCCINT_EL1.TX == '1'));
SetInterruptRequestLevel(InterruptID_COMMIRQ, if commirq then HIGH else LOW);
return;
// Set a Cross Trigger multi-cycle input event trigger to the specified level.
CTI_SetEventLevel(// DBGDTRRX_EL0[] (external write)
// ===============================
// Called on writes to debug register 0x08C.DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if EDSCR.RXfull == '1' || (Halted() && EDSCR.MA == '1' && EDSCR.ITE == '0') then
EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write
return;
EDSCR.RXfull = '1';
DTRRX = value;
if Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0"
ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4"
X[1] = bits(64) UNKNOWN;
else
ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint"
ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4"
R[1] = bits(32) UNKNOWN;
// If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.RXfull = bit UNKNOWN;
DBGDTRRX_EL0 = bits(64) UNKNOWN;
else
// "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull.
assert EDSCR.RXfull == '0';
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return;
// DBGDTRRX_EL0[] (external read)
// ==============================
bits(32) CrossTriggerIn id, signal level);DBGDTRRX_EL0[boolean memory_mapped]
return DTRRX;
// Signal a discrete event on a Cross Trigger input event trigger.// DBGDTRTX_EL0[] (external read)
// ==============================
// Called on reads of debug register 0x080.
bits(32)
CTI_SignalEvent(DBGDTRTX_EL0[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
underrun = EDSCR.TXfull == '0' || (() && EDSCR.MA == '1' && EDSCR.ITE == '0');
value = if underrun then bits(32) UNKNOWN else DTRTX;
if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects
return value;
if underrun then
EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects
return value; // Return UNKNOWN
EDSCR.TXfull = '0';
if Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4"
else
ExecuteT32(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4"
// If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.TXfull = bit UNKNOWN;
DBGDTRTX_EL0 = bits(64) UNKNOWN;
else
if !UsingAArch32() then
ExecuteA64(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1"
else
ExecuteT32(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1"
// "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull.
assert EDSCR.TXfull == '1';
if !UsingAArch32() then
X[1] = bits(64) UNKNOWN;
else
R[1] = bits(32) UNKNOWN;
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return value;
// DBGDTRTX_EL0[] (external write)
// ===============================
CrossTriggerInHalted id);DBGDTRTX_EL0[boolean memory_mapped] = bits(32) value
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
DTRTX = value;
return;
enumeration// DBGDTR_EL0[] (write)
// ====================
// System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32) CrossTriggerOut {DBGDTR_EL0[] = bits(N) value
// For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored
// For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0>
assert N IN {32,64};
if EDSCR.TXfull == '1' then
value = bits(N) UNKNOWN;
// On a 64-bit write, implement a half-duplex channel
if N == 64 then DTRRX = value<63:32>;
DTRTX = value<31:0>; // 32-bit or 64-bit write
EDSCR.TXfull = '1';
return;
// DBGDTR_EL0[] (read)
// ===================
// System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32)
bits(N)CrossTriggerOut_DebugRequest,DBGDTR_EL0[]
// For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result
// For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result
assert N IN {32,64};
bits(N) result;
if EDSCR.RXfull == '0' then
result = bits(N) UNKNOWN;
else
// On a 64-bit read, implement a half-duplex channel
// NOTE: the word order is reversed on reads with regards to writes
if N == 64 then result<63:32> = DTRTX;
result<31:0> = DTRRX;
EDSCR.RXfull = '0';
return result; CrossTriggerOut_RestartRequest,
CrossTriggerOut_IRQ, CrossTriggerOut_RSVD3,
CrossTriggerOut_TraceExtIn0, CrossTriggerOut_TraceExtIn1,
CrossTriggerOut_TraceExtIn2, CrossTriggerOut_TraceExtIn3};
enumeration CrossTriggerIn {CrossTriggerIn_CrossHalt, CrossTriggerIn_PMUOverflow,
CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3,
CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1,
CrossTriggerIn_TraceExtOut2, CrossTriggerIn_TraceExtOut3};
// CheckForDCCInterrupts()
// =======================bits(32) DTRRX;
bits(32) DTRTX;
CheckForDCCInterrupts()
commrx = (EDSCR.RXfull == '1');
commtx = (EDSCR.TXfull == '0');
// COMMRX and COMMTX support is optional and not recommended for new designs.
// SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW);
// SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW);
// The value to be driven onto the common COMMIRQ signal.
if ELUsingAArch32(EL1) then
commirq = ((commrx && DBGDCCINT.RX == '1') ||
(commtx && DBGDCCINT.TX == '1'));
else
commirq = ((commrx && MDCCINT_EL1.RX == '1') ||
(commtx && MDCCINT_EL1.TX == '1'));
SetInterruptRequestLevel(InterruptID_COMMIRQ, if commirq then HIGH else LOW);
return;
// DBGDTRRX_EL0[] (external write)
// ===============================
// Called on writes to debug register 0x08C.// EDITR[] (external write)
// ========================
// Called on writes to debug register 0x084.
DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value
EDITR[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if EDSCR.RXfull == '1' || ( if !Halted() && EDSCR.MA == '1' && EDSCR.ITE == '0') then
EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write
() then return; // Non-debug state: ignore write
if EDSCR.ITE == '0' || EDSCR.MA == '1' then
EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write
return;
EDSCR.RXfull = '1';
DTRRX = value;
// ITE indicates whether the processor is ready to accept another instruction; the processor
// may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there
// is no indication that the pipeline is empty (all instructions have completed). In this
// pseudocode, the assumption is that only one instruction can be executed at a time,
// meaning ITE acts like "InstrCompl".
EDSCR.ITE = '0';
if if ! Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0"(value);
else
ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4"
X[1] = bits(64) UNKNOWN;
else
ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint"
ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4"
R[1] = bits(32) UNKNOWN;
// If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.RXfull = bit UNKNOWN;
DBGDTRRX_EL0 = bits(64) UNKNOWN;
else
// "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull.
assert EDSCR.RXfull == '0';
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return;
// DBGDTRRX_EL0[] (external read)
// ==============================
bits(32) DBGDTRRX_EL0[boolean memory_mapped]
return DTRRX;(value<15:0>/*hw1*/, value<31:16> /*hw2*/);
EDSCR.ITE = '1';
return;
// DBGDTRTX_EL0[] (external read)
// ==============================
// Called on reads of debug register 0x080.
bits(32)// DCPSInstruction()
// =================
// Operation of the DCPS instruction in Debug state DBGDTRTX_EL0[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
underrun = EDSCR.TXfull == '0' || (DCPSInstruction(bits(2) target_el)HaltedSynchronizeContext() && EDSCR.MA == '1' && EDSCR.ITE == '0');
value = if underrun then bits(32) UNKNOWN else DTRTX;
();
if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects
return value;
if underrun then
EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects
return value; // Return UNKNOWN
EDSCR.TXfull = '0';
if case target_el of
when HaltedEL1() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !if PSTATE.EL ==EL2 || (PSTATE.EL == EL3 && !UsingAArch32() then()) then handle_el = PSTATE.EL;
elsif
ExecuteA64EL2Enabled(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4"
else() && HCR_EL2.TGE == '1' then UNDEFINED;
else handle_el =
ExecuteT32EL1(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4"
// If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.TXfull = bit UNKNOWN;
DBGDTRTX_EL0 = bits(64) UNKNOWN;
else
if !;
whenEL2
if !HaveEL(EL2) then UNDEFINED;
elsif PSTATE.EL == EL3 && !UsingAArch32() then() then handle_el =
ExecuteA64EL3(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1"
else;
elsif !
ExecuteT32IsSecureEL2Enabled(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1"
// "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull.
assert EDSCR.TXfull == '1';
if !() &&IsSecure() then UNDEFINED;
else handle_el = EL2;
when EL3
if EDSCR.SDD == '1' || !HaveEL(EL3) then UNDEFINED;
handle_el = EL3;
otherwise
Unreachable();
from_secure = IsSecure();
if ELUsingAArch32(handle_el) then
if PSTATE.M == M32_Monitor then SCR.NS = '0';
assert UsingAArch32() then(); // Cannot move from AArch64 to AArch32
case handle_el of
when
AArch32.WriteModeXEL1[1] = bits(64) UNKNOWN;
else(
RM32_Svc[1] = bits(32) UNKNOWN;
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return value;
// DBGDTRTX_EL0[] (external write)
// ===============================);
if
() && SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
when EL2 AArch32.WriteMode(M32_Hyp);
when EL3AArch32.WriteMode(M32_Monitor);
if HavePANExt() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if handle_el == EL2 then
ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN;
else
LR = bits(32) UNKNOWN;
SPSR[] = bits(32) UNKNOWN;
PSTATE.E = SCTLR[].EE;
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else // Targeting AArch64
if UsingAArch32() then
AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el;
if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') ||
(handle_el == EL2 && HCR_EL2.E2H == '1' &&
HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then
PSTATE.PAN = '1';
ELR[] = bits(64) UNKNOWN; SPSR[] = bits(64) UNKNOWN; ESR[] = bits(64) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveMTEExt() then PSTATE.TCO = '1';
UpdateEDSCRFields(); // Update EDSCR PE state flags
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrorsDBGDTRTX_EL0[boolean memory_mapped] = bits(32) value
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
DTRTX = value;
();
return;
// DBGDTR_EL0[] (write)
// ====================
// System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32)// DRPSInstruction()
// =================
// Operation of the A64 DRPS and T32 ERET instructions in Debug state
DBGDTR_EL0[] = bits(N) value
// For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored
// For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0>
assert N IN {32,64};
if EDSCR.TXfull == '1' then
value = bits(N) UNKNOWN;
// On a 64-bit write, implement a half-duplex channel
if N == 64 then DTRRX = value<63:32>;
DTRTX = value<31:0>; // 32-bit or 64-bit write
EDSCR.TXfull = '1';
return;
// DBGDTR_EL0[] (read)
// ===================
// System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32)
bits(N)DRPSInstruction() ();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
bits(64) spsr = SPSR[];
SetPSTATEFromPSR(spsr);
// PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so
// behave as if UNKNOWN.
if UsingAArch32() then
PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else
PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN;
UpdateEDSCRFieldsDBGDTR_EL0[]
// For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result
// For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result
assert N IN {32,64};
bits(N) result;
if EDSCR.RXfull == '0' then
result = bits(N) UNKNOWN;
else
// On a 64-bit read, implement a half-duplex channel
// NOTE: the word order is reversed on reads with regards to writes
if N == 64 then result<63:32> = DTRTX;
result<31:0> = DTRRX;
EDSCR.RXfull = '0';
return result;(); // Update EDSCR PE state flags
return;
bits(32) DTRRX;
bits(32) DTRTX;constant bits(6)DebugHalt_Breakpoint = '000111';
constant bits(6) DebugHalt_EDBGRQ = '010011';
constant bits(6) DebugHalt_Step_Normal = '011011';
constant bits(6) DebugHalt_Step_Exclusive = '011111';
constant bits(6) DebugHalt_OSUnlockCatch = '100011';
constant bits(6) DebugHalt_ResetCatch = '100111';
constant bits(6) DebugHalt_Watchpoint = '101011';
constant bits(6) DebugHalt_HaltInstruction = '101111';
constant bits(6) DebugHalt_SoftwareAccess = '110011';
constant bits(6) DebugHalt_ExceptionCatch = '110111';
constant bits(6) DebugHalt_Step_NoSyndrome = '111011';
// EDITR[] (external write)
// ========================
// Called on writes to debug register 0x084.
EDITR[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if !Halted() then return; // Non-debug state: ignore write
if EDSCR.ITE == '0' || EDSCR.MA == '1' then
EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write
return;
// ITE indicates whether the processor is ready to accept another instruction; the processor
// may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there
// is no indication that the pipeline is empty (all instructions have completed). In this
// pseudocode, the assumption is that only one instruction can be executed at a time,
// meaning ITE acts like "InstrCompl".
EDSCR.ITE = '0';
if !UsingAArch32() then
ExecuteA64(value);
else
ExecuteT32(value<15:0>/*hw1*/, value<31:16> /*hw2*/);
EDSCR.ITE = '1';
return;DisableITRAndResumeInstructionPrefetch();
// DCPSInstruction()
// =================
// Operation of the DCPS instruction in Debug state// Execute an A64 instruction in Debug state.
DCPSInstruction(bits(2) target_el)ExecuteA64(bits(32) instr);
SynchronizeContext();
case target_el of
when EL1
if PSTATE.EL == EL2 || (PSTATE.EL == EL3 && !UsingAArch32()) then handle_el = PSTATE.EL;
elsif EL2Enabled() && HCR_EL2.TGE == '1' then UNDEFINED;
else handle_el = EL1;
when EL2
if !HaveEL(EL2) then UNDEFINED;
elsif PSTATE.EL == EL3 && !UsingAArch32() then handle_el = EL3;
elsif !IsSecureEL2Enabled() && IsSecure() then UNDEFINED;
else handle_el = EL2;
when EL3
if EDSCR.SDD == '1' || !HaveEL(EL3) then UNDEFINED;
handle_el = EL3;
otherwise
Unreachable();
from_secure = IsSecure();
if ELUsingAArch32(handle_el) then
if PSTATE.M == M32_Monitor then SCR.NS = '0';
assert UsingAArch32(); // Cannot move from AArch64 to AArch32
case handle_el of
when EL1AArch32.WriteMode(M32_Svc);
if HavePANExt() && SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
when EL2 AArch32.WriteMode(M32_Hyp);
when EL3AArch32.WriteMode(M32_Monitor);
if HavePANExt() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if handle_el == EL2 then
ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN;
else
LR = bits(32) UNKNOWN;
SPSR[] = bits(32) UNKNOWN;
PSTATE.E = SCTLR[].EE;
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else // Targeting AArch64
if UsingAArch32() then
AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el;
if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') ||
(handle_el == EL2 && HCR_EL2.E2H == '1' &&
HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then
PSTATE.PAN = '1';
ELR[] = bits(64) UNKNOWN; SPSR[] = bits(64) UNKNOWN; ESR[] = bits(64) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveMTEExt() then PSTATE.TCO = '1';
UpdateEDSCRFields(); // Update EDSCR PE state flags
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
return;
// DRPSInstruction()
// =================
// Operation of the A64 DRPS and T32 ERET instructions in Debug state// Execute a T32 instruction in Debug state.
DRPSInstruction()ExecuteT32(bits(16) hw1, bits(16) hw2);
SynchronizeContext();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
bits(64) spsr = SPSR[];
SetPSTATEFromPSR(spsr);
// PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so
// behave as if UNKNOWN.
if UsingAArch32() then
PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else
PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN;
UpdateEDSCRFields(); // Update EDSCR PE state flags
return;
constant bits(6)// ExitDebugState()
// ================ DebugHalt_Breakpoint = '000111';
constant bits(6)ExitDebugState()
assert DebugHalt_EDBGRQ = '010011';
constant bits(6)(); DebugHalt_Step_Normal = '011011';
constant bits(6)();
// Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to
// detect that the PE has restarted.
EDSCR.STATUS = '000001'; // Signal restarting
EDESR<2:0> = '000'; // Clear any pending Halting debug events
bits(64) new_pc;
bits(64) spsr;
if DebugHalt_Step_Exclusive = '011111';
constant bits(6)() then
new_pc = DebugHalt_OSUnlockCatch = '100011';
constant bits(6)(DLR);
spsr = DebugHalt_ResetCatch = '100111';
constant bits(6)(DSPSR);
else
new_pc = DLR_EL0;
spsr = DSPSR_EL0;
// If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL.
if DebugHalt_Watchpoint = '101011';
constant bits(6)() then DebugHalt_HaltInstruction = '101111';
constant bits(6)(spsr<31:0>); // Can update privileged bits, even at EL0
else DebugHalt_SoftwareAccess = '110011';
constant bits(6)(spsr); // Can update privileged bits, even at EL0
if DebugHalt_ExceptionCatch = '110111';
constant bits(6)() then
if (Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0';
BranchTo(new_pc<31:0>, BranchType_DBGEXIT); // AArch32 branch
else
// If targeting AArch32 then possibly zero the 32 most significant bits of the target PC
if spsr<4> == '1' && ConstrainUnpredictableBool(Unpredictable_RESTARTZEROUPPERPC) then
new_pc<63:32> = Zeros();
BranchTo(new_pc, BranchType_DBGEXIT); // A type of branch that is never predicted
(EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restarted
UpdateEDSCRFields(); // Stop signalling PE state
DisableITRAndResumeInstructionPrefetchDebugHalt_Step_NoSyndrome = '111011';();
return;
// Halt()
// ======
Halt(bits(6) reason)
CTI_SignalEvent(CrossTriggerIn_CrossHalt); // Trigger other cores to halt
bits(64) preferred_restart_address = ThisInstrAddr();
bits(32) spsr_32;
bits(64) spsr_64;
if UsingAArch32() then
spsr_32 = GetPSRFromPSTATE(DebugState);
else
spsr_64 = GetPSRFromPSTATE(DebugState);
if (HaveBTIExt() &&
!(reason IN {DebugHalt_Step_Normal, DebugHalt_Step_Exclusive, DebugHalt_Step_NoSyndrome,
DebugHalt_Breakpoint, DebugHalt_HaltInstruction}) &&
ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE)) then
if UsingAArch32() then
spsr_32<11:10> = '00';
else
spsr_64<11:10> = '00';
if UsingAArch32() then
DLR = preferred_restart_address<31:0>;
DSPSR = spsr_32;
else
DLR_EL0 = preferred_restart_address;
DSPSR_EL0 = spsr_64;
EDSCR.ITE = '1';
EDSCR.ITO = '0';
if IsSecure() then
EDSCR.SDD = '0'; // If entered in Secure state, allow debug
elsif HaveEL(EL3) then
EDSCR.SDD = if ExternalSecureInvasiveDebugEnabled() then '0' else '1';
else
assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1
EDSCR.MA = '0';
// In Debug state:
// * PSTATE.{SS,SSBS,D,A,I,F} are not observable and ignored so behave-as-if UNKNOWN.
// * PSTATE.{N,Z,C,V,Q,GE,E,M,nRW,EL,SP,DIT} are also not observable, but since these
// are not changed on exception entry, this function also leaves them unchanged.
// * PSTATE.{IT,T} are ignored.
// * PSTATE.IL is ignored and behave-as-if 0.
// * PSTATE.{UAO,PAN} are observable and not changed on entry into Debug state.
if UsingAArch32() then
PSTATE.<IT,SS,SSBS,A,I,F,T> = bits(14) UNKNOWN;
else
PSTATE.<SS,SSBS,D,A,I,F> = bits(6) UNKNOWN;
PSTATE.BTYPE = '00';
PSTATE.IL = '0';
StopInstructionPrefetchAndEnableITR();
EDSCR.STATUS = reason; // Signal entered Debug state
UpdateEDSCRFieldsDisableITRAndResumeInstructionPrefetch();(); // Update EDSCR PE state flags.
return;
// Execute an A64 instruction in Debug state.// HaltOnBreakpointOrWatchpoint()
// ==============================
// Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug
// state entry, FALSE if they should be considered for a debug exception.
boolean
ExecuteA64(bits(32) instr);HaltOnBreakpointOrWatchpoint()
returnHaltingAllowed() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';
// Execute a T32 instruction in Debug state.// Halted()
// ========
boolean
ExecuteT32(bits(16) hw1, bits(16) hw2);Halted()
return !(EDSCR.STATUS IN {'000001', '000010'}); // Halted
// ExitDebugState()
// ================// HaltingAllowed()
// ================
// Returns TRUE if halting is currently allowed, FALSE if halting is prohibited.
boolean
ExitDebugState()
assertHaltingAllowed()
if Halted();() ||
SynchronizeContextDoubleLockStatus();
// Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to
// detect that the PE has restarted.
EDSCR.STATUS = '000001'; // Signal restarting
EDESR<2:0> = '000'; // Clear any pending Halting debug events
bits(64) new_pc;
bits(64) spsr;
if() then
return FALSE;
elsif UsingAArch32IsSecure() then
new_pc = return ZeroExtendExternalSecureInvasiveDebugEnabled(DLR);
spsr =();
else
return ZeroExtendExternalInvasiveDebugEnabled(DSPSR);
else
new_pc = DLR_EL0;
spsr = DSPSR_EL0;
// If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL.
if UsingAArch32() then
SetPSTATEFromPSR(spsr<31:0>); // Can update privileged bits, even at EL0
else
SetPSTATEFromPSR(spsr); // Can update privileged bits, even at EL0
if UsingAArch32() then
if ConstrainUnpredictableBool(Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0';
BranchTo(new_pc<31:0>, BranchType_DBGEXIT); // AArch32 branch
else
// If targeting AArch32 then possibly zero the 32 most significant bits of the target PC
if spsr<4> == '1' && ConstrainUnpredictableBool(Unpredictable_RESTARTZEROUPPERPC) then
new_pc<63:32> = Zeros();
BranchTo(new_pc, BranchType_DBGEXIT); // A type of branch that is never predicted
(EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restarted
UpdateEDSCRFields(); // Stop signalling PE state
DisableITRAndResumeInstructionPrefetch();
return;();
// Halt()
// ======// Restarting()
// ============
boolean
Halt(bits(6) reason)Restarting()
return EDSCR.STATUS == '000001'; // Restarting
CTI_SignalEvent(CrossTriggerIn_CrossHalt); // Trigger other cores to halt
bits(64) preferred_restart_address = ThisInstrAddr();
bits(32) spsr_32;
bits(64) spsr_64;
if UsingAArch32() then
spsr_32 = GetPSRFromPSTATE(DebugState);
else
spsr_64 = GetPSRFromPSTATE(DebugState);
if (HaveBTIExt() &&
!(reason IN {DebugHalt_Step_Normal, DebugHalt_Step_Exclusive, DebugHalt_Step_NoSyndrome,
DebugHalt_Breakpoint, DebugHalt_HaltInstruction}) &&
ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE)) then
if UsingAArch32() then
spsr_32<11:10> = '00';
else
spsr_64<11:10> = '00';
if UsingAArch32() then
DLR = preferred_restart_address<31:0>;
DSPSR = spsr_32;
else
DLR_EL0 = preferred_restart_address;
DSPSR_EL0 = spsr_64;
EDSCR.ITE = '1';
EDSCR.ITO = '0';
if IsSecure() then
EDSCR.SDD = '0'; // If entered in Secure state, allow debug
elsif HaveEL(EL3) then
EDSCR.SDD = if ExternalSecureInvasiveDebugEnabled() then '0' else '1';
else
assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1
EDSCR.MA = '0';
// In Debug state:
// * PSTATE.{SS,SSBS,D,A,I,F} are not observable and ignored so behave-as-if UNKNOWN.
// * PSTATE.{N,Z,C,V,Q,GE,E,M,nRW,EL,SP,DIT} are also not observable, but since these
// are not changed on exception entry, this function also leaves them unchanged.
// * PSTATE.{IT,T} are ignored.
// * PSTATE.IL is ignored and behave-as-if 0.
// * PSTATE.BTYPE is ignored and behave-as-if 0.
// * PSTATE.TCO is set 1.
// * PSTATE.{UAO,PAN} are observable and not changed on entry into Debug state.
if UsingAArch32() then
PSTATE.<IT,SS,SSBS,A,I,F,T> = bits(14) UNKNOWN;
else
PSTATE.<SS,SSBS,D,A,I,F> = bits(6) UNKNOWN;
PSTATE.TCO = '1';
PSTATE.BTYPE = '00';
PSTATE.IL = '0';
StopInstructionPrefetchAndEnableITR();
EDSCR.STATUS = reason; // Signal entered Debug state
UpdateEDSCRFields(); // Update EDSCR PE state flags.
return;
// HaltOnBreakpointOrWatchpoint()
// ==============================
// Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug
// state entry, FALSE if they should be considered for a debug exception.
boolean HaltOnBreakpointOrWatchpoint()
return HaltingAllowed() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';StopInstructionPrefetchAndEnableITR();
// Halted()
// ========
boolean// UpdateEDSCRFields()
// ===================
// Update EDSCR PE state fields Halted()
return !(EDSCR.STATUS IN {'000001', '000010'}); // HaltedUpdateEDSCRFields()
if !Halted() then
EDSCR.EL = '00';
EDSCR.NS = bit UNKNOWN;
EDSCR.RW = '1111';
else
EDSCR.EL = PSTATE.EL;
EDSCR.NS = if IsSecure() then '0' else '1';
bits(4) RW;
RW<1> = if ELUsingAArch32(EL1) then '0' else '1';
if PSTATE.EL != EL0 then
RW<0> = RW<1>;
else
RW<0> = if UsingAArch32() then '0' else '1';
if !HaveEL(EL2) || (HaveEL(EL3) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then
RW<2> = RW<1>;
else
RW<2> = if ELUsingAArch32(EL2) then '0' else '1';
if !HaveEL(EL3) then
RW<3> = RW<2>;
else
RW<3> = if ELUsingAArch32(EL3) then '0' else '1';
// The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32.
if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN;
elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN;
elsif RW<1> == '0' then RW<0> = bit UNKNOWN;
EDSCR.RW = RW;
return;
// HaltingAllowed()
// ================
// Returns TRUE if halting is currently allowed, FALSE if halting is prohibited.
boolean// CheckExceptionCatch()
// =====================
// Check whether an Exception Catch debug event is set on the current Exception level HaltingAllowed()
ifCheckExceptionCatch(boolean exception_entry)
// Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct
// for the exception target.
base = if HaltedIsSecure() ||() then 0 else 4;
if DoubleLockStatusHaltingAllowed() then
return FALSE;
elsif if IsSecureHaveExtendedECDebugEvents() then
return exception_exit = !exception_entry;
ctrl = EDECCR< ExternalSecureInvasiveDebugEnabledUInt();
else
return(PSTATE.EL) + base + 8>:EDECCR< (PSTATE.EL) + base>;
case ctrl of
when '00' halt = FALSE;
when '01' halt = TRUE;
when '10' halt = (exception_exit == TRUE);
when '11' halt = (exception_entry == TRUE);
else
halt = (EDECCR<UInt(PSTATE.EL) + base> == '1');
if halt then Halt(DebugHalt_ExceptionCatchExternalInvasiveDebugEnabledUInt(););
// Restarting()
// ============
boolean// CheckHaltingStep()
// ==================
// Check whether EDESR.SS has been set by Halting Step Restarting()
return EDSCR.STATUS == '000001'; // RestartingCheckHaltingStep()
ifHaltingAllowed() && EDESR.SS == '1' then
// The STATUS code depends on how we arrived at the state where EDESR.SS == 1.
if HaltingStep_DidNotStep() then
Halt(DebugHalt_Step_NoSyndrome);
elsif HaltingStep_SteppedEX() then
Halt(DebugHalt_Step_Exclusive);
else
Halt(DebugHalt_Step_Normal);
// CheckOSUnlockCatch()
// ====================
// Called on unlocking the OS Lock to pend an OS Unlock Catch debug event
CheckOSUnlockCatch()
if (HaveDoPD() && CTIDEVCTL.OSUCE == '1')
|| (!HaveDoPD() && EDECR.OSUCE == '1')
then
if !HaltedStopInstructionPrefetchAndEnableITR();() then EDESR.OSUC = '1';
// UpdateEDSCRFields()
// ===================
// Update EDSCR PE state fields// CheckPendingOSUnlockCatch()
// ===========================
// Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event
UpdateEDSCRFields()
if !CheckPendingOSUnlockCatch()
ifHaltedHaltingAllowed() then
EDSCR.EL = '00';
EDSCR.NS = bit UNKNOWN;
EDSCR.RW = '1111';
else
EDSCR.EL = PSTATE.EL;
EDSCR.NS = if() && EDESR.OSUC == '1' then IsSecureHalt() then '0' else '1';
bits(4) RW;
RW<1> = if( ELUsingAArch32DebugHalt_OSUnlockCatch(EL1) then '0' else '1';
if PSTATE.EL != EL0 then
RW<0> = RW<1>;
else
RW<0> = if UsingAArch32() then '0' else '1';
if !HaveEL(EL2) || (HaveEL(EL3) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then
RW<2> = RW<1>;
else
RW<2> = if ELUsingAArch32(EL2) then '0' else '1';
if !HaveEL(EL3) then
RW<3> = RW<2>;
else
RW<3> = if ELUsingAArch32(EL3) then '0' else '1';
// The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32.
if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN;
elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN;
elsif RW<1> == '0' then RW<0> = bit UNKNOWN;
EDSCR.RW = RW;
return;);
// CheckExceptionCatch()
// =====================
// Check whether an Exception Catch debug event is set on the current Exception level// CheckPendingResetCatch()
// ========================
// Check whether EDESR.RC has been set by a Reset Catch debug event
CheckExceptionCatch(boolean exception_entry)
// Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct
// for the exception target.
base = ifCheckPendingResetCatch()
if IsSecure() then 0 else 4;
if HaltingAllowed() then
if() && EDESR.RC == '1' then HaveExtendedECDebugEvents() then
exception_exit = !exception_entry;
ctrl = EDECCR<UInt(PSTATE.EL) + base + 8>:EDECCR<UInt(PSTATE.EL) + base>;
case ctrl of
when '00' halt = FALSE;
when '01' halt = TRUE;
when '10' halt = (exception_exit == TRUE);
when '11' halt = (exception_entry == TRUE);
else
halt = (EDECCR<UInt(PSTATE.EL) + base> == '1');
if halt then Halt(DebugHalt_ExceptionCatchDebugHalt_ResetCatch);
// CheckHaltingStep()
// ==================
// Check whether EDESR.SS has been set by Halting Step// CheckResetCatch()
// =================
// Called after reset
CheckHaltingStep()
ifCheckResetCatch()
if ( HaltingAllowedHaveDoPD() && EDESR.SS == '1' then
// The STATUS code depends on how we arrived at the state where EDESR.SS == 1.
if() && CTIDEVCTL.RCE == '1') || (! HaltingStep_DidNotStepHaveDoPD() then() && EDECR.RCE == '1') then
EDESR.RC = '1';
// If halting is allowed then halt immediately
if
HaltHaltingAllowed(() thenDebugHalt_Step_NoSyndrome);
elsif HaltingStep_SteppedEX() then
Halt(DebugHalt_Step_ExclusiveDebugHalt_ResetCatch);
else
Halt(DebugHalt_Step_Normal);
// CheckOSUnlockCatch()
// ====================
// Called on unlocking the OS Lock to pend an OS Unlock Catch debug event// CheckSoftwareAccessToDebugRegisters()
// =====================================
// Check for access to Breakpoint and Watchpoint registers.
CheckOSUnlockCatch()
if (CheckSoftwareAccessToDebugRegisters()
os_lock = (ifHaveDoPDELUsingAArch32() && CTIDEVCTL.OSUCE == '1')
|| (!(HaveDoPDEL1() && EDECR.OSUCE == '1')
then
if !) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK);
if() && EDSCR.TDA == '1' && os_lock == '0' then
Halt(DebugHalt_SoftwareAccessHaltedHaltingAllowed() then EDESR.OSUC = '1';);
// CheckPendingOSUnlockCatch()
// ===========================
// Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event// ExternalDebugRequest()
// ======================
CheckPendingOSUnlockCatch()
ExternalDebugRequest()
if HaltingAllowed() && EDESR.OSUC == '1' then() then
Halt(DebugHalt_OSUnlockCatchDebugHalt_EDBGRQ););
// Otherwise the CTI continues to assert the debug request until it is taken.
// CheckPendingResetCatch()
// ========================
// Check whether EDESR.RC has been set by a Reset Catch debug event// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
boolean
CheckPendingResetCatch()
ifHaltingStep_DidNotStep(); HaltingAllowed() && EDESR.RC == '1' then
Halt(DebugHalt_ResetCatch);
// CheckResetCatch()
// =================
// Called after reset// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
boolean
CheckResetCatch()
if (HaltingStep_SteppedEX();HaveDoPD() && CTIDEVCTL.RCE == '1') || (!HaveDoPD() && EDECR.RCE == '1') then
EDESR.RC = '1';
// If halting is allowed then halt immediately
if HaltingAllowed() then Halt(DebugHalt_ResetCatch);
// CheckSoftwareAccessToDebugRegisters()
// =====================================
// Check for access to Breakpoint and Watchpoint registers.// RunHaltingStep()
// ================
CheckSoftwareAccessToDebugRegisters()
os_lock = (ifRunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall,
boolean reset)
// "exception_generated" is TRUE if the previous instruction generated a synchronous exception
// or was cancelled by an asynchronous exception.
//
// if "exception_generated" is TRUE then "exception_target" is the target of the exception, and
// "syscall" is TRUE if the exception is a synchronous exception where the preferred return
// address is the instruction following that which generated the exception.
//
// "reset" is TRUE if exiting reset state into the highest EL.
if reset then assert ! ELUsingAArch32Halted((); // Cannot come out of reset halted
active = EDECR.SS == '1' && !EL1Halted) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK);
if();
if active && reset then // Coming out of reset with EDECR.SS set
EDESR.SS = '1';
elsif active && HaltingAllowed() && EDSCR.TDA == '1' && os_lock == '0' then() then
if exception_generated && exception_target ==
HaltEL3(then
advance = syscall ||DebugHalt_SoftwareAccessExternalSecureInvasiveDebugEnabled);();
else
advance = TRUE;
if advance then EDESR.SS = '1';
return;
// ExternalDebugRequest()
// ======================// ExternalDebugInterruptsDisabled()
// =================================
// Determine whether EDSCR disables interrupts routed to 'target'
boolean
ExternalDebugRequest()
ExternalDebugInterruptsDisabled(bits(2) target)
if HaltingAllowedHavev8p4Debug() then() then
if target ==
HaltEL3(||() then
int_dis = (EDSCR.INTdis[0] == '1' && ExternalSecureInvasiveDebugEnabled());
else
int_dis = (EDSCR.INTdis[0] == '1');
else
case target of
when EL3
int_dis = (EDSCR.INTdis == '11' && ExternalSecureInvasiveDebugEnabled());
when EL2
int_dis = (EDSCR.INTdis == '1x' && ExternalInvasiveDebugEnabled());
when EL1
if IsSecure() then
int_dis = (EDSCR.INTdis == '1x' && ExternalSecureInvasiveDebugEnabled());
else
int_dis = (EDSCR.INTdis != '00' && ExternalInvasiveDebugEnabledDebugHalt_EDBGRQIsSecure);
// Otherwise the CTI continues to assert the debug request until it is taken.());
return int_dis;
// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
booleanenumeration HaltingStep_DidNotStep();InterruptID {InterruptID_PMUIRQ, InterruptID_COMMIRQ, InterruptID_CTIIRQ,
InterruptID_COMMRX, InterruptID_COMMTX};
// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
boolean// Set a level-sensitive interrupt to the specified level.
SetInterruptRequestLevel( HaltingStep_SteppedEX();id, signal level);
// RunHaltingStep()
// CreatePCSample()
// ================
RunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall,
boolean reset)
// "exception_generated" is TRUE if the previous instruction generated a synchronous exception
// or was cancelled by an asynchronous exception.
//
// if "exception_generated" is TRUE then "exception_target" is the target of the exception, and
// "syscall" is TRUE if the exception is a synchronous exception where the preferred return
// address is the instruction following that which generated the exception.
//
// "reset" is TRUE if exiting reset state into the highest EL.
CreatePCSample()
// In a simple sequential execution of the program, CreatePCSample is executed each time the PE
// executes an instruction that can be sampled. An implementation is not constrained such that
// reads of EDPCSRlo return the current values of PC, etc.
if reset then assert ! pc_sample.valid =HaltedExternalNoninvasiveDebugAllowed(); // Cannot come out of reset halted
active = EDECR.SS == '1' && !() && !Halted();
if active && reset then // Coming out of reset with EDECR.SS set
EDESR.SS = '1';
elsif active && pc_sample.pc = HaltingAllowedThisInstrAddr() then
if exception_generated && exception_target ==();
pc_sample.el = PSTATE.EL;
pc_sample.rw = if EL3UsingAArch32 then
advance = syscall ||() then '0' else '1';
pc_sample.ns = if () then '0' else '1';
pc_sample.contextidr = if ELUsingAArch32(EL1) then CONTEXTIDR else CONTEXTIDR_EL1<31:0>;
pc_sample.has_el2 = EL2Enabled();
if EL2Enabled() then
if ELUsingAArch32(EL2) then
pc_sample.vmid = ZeroExtend(VTTBR.VMID, 16);
elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then
pc_sample.vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
else
pc_sample.vmid = VTTBR_EL2.VMID;
if (HaveVirtHostExt() || HaveV82Debug()) && !ELUsingAArch32(EL2) then
pc_sample.contextidr_el2 = CONTEXTIDR_EL2<31:0>;
else
pc_sample.contextidr_el2 = bits(32) UNKNOWN;
pc_sample.el0h = PSTATE.EL == EL0 && IsInHostExternalSecureInvasiveDebugEnabledIsSecure();
else
advance = TRUE;
if advance then EDESR.SS = '1';
return;
// ExternalDebugInterruptsDisabled()
// =================================
// Determine whether EDSCR disables interrupts routed to 'target'
// EDPCSRlo[] (read)
// =================
booleanbits(32) ExternalDebugInterruptsDisabled(bits(2) target)
ifEDPCSRlo[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
if Havev8p4DebugHaveVirtHostExt() then
if target ==() && EDSCR.SC2 == '1' then
EDPCSRhi.PC = (if pc_sample.rw == '0' then EL3Zeros ||(24) else pc_sample.pc<55:32>);
EDPCSRhi.EL = pc_sample.el;
EDPCSRhi.NS = pc_sample.ns;
else
EDPCSRhi = (if pc_sample.rw == '0' then IsSecureZeros() then
int_dis = (EDSCR.INTdis[0] == '1' &&(32) else pc_sample.pc<63:32>);
EDCIDSR = pc_sample.contextidr;
if ( ExternalSecureInvasiveDebugEnabledHaveVirtHostExt());
else
int_dis = (EDSCR.INTdis[0] == '1');
else
case target of
when() || EL3HaveV82Debug
int_dis = (EDSCR.INTdis == '11' &&()) && EDSCR.SC2 == '1' then
EDVIDSR = (if ExternalSecureInvasiveDebugEnabledHaveEL());
when( EL2
int_dis = (EDSCR.INTdis == '1x' &&) && pc_sample.ns == '1' then pc_sample.contextidr_el2
else bits(32) UNKNOWN);
else
if ExternalInvasiveDebugEnabledHaveEL());
when( EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1
if, IsSecureEL0() then
int_dis = (EDSCR.INTdis == '1x' &&} then
EDVIDSR.VMID = pc_sample.vmid;
else
EDVIDSR.VMID = ExternalSecureInvasiveDebugEnabledZeros());
else
int_dis = (EDSCR.INTdis != '00' &&();
EDVIDSR.NS = pc_sample.ns;
EDVIDSR.E2 = (if pc_sample.el == then '1' else '0');
EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw;
// The conditions for setting HV are not specified if PCSRhi is zero.
// An example implementation may be "pc_sample.rw".
EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1");
else
sample = OnesExternalInvasiveDebugEnabledEL2());
return int_dis;(32);
if update then
EDPCSRhi = bits(32) UNKNOWN;
EDCIDSR = bits(32) UNKNOWN;
EDVIDSR = bits(32) UNKNOWN;
return sample;
enumerationtype InterruptID {PCSample is (
boolean valid,
bits(64) pc,
bits(2) el,
bit rw,
bit ns,
boolean has_el2,
bits(32) contextidr,
bits(32) contextidr_el2,
boolean el0h,
bits(16) vmid
)InterruptID_PMUIRQ, InterruptID_COMMIRQ, InterruptID_CTIIRQ,
InterruptID_COMMRX, InterruptID_COMMTX};pc_sample;
// Set a level-sensitive interrupt to the specified level.
SetInterruptRequestLevel(// PMPCSR[] (read)
// ===============
bits(32)PMPCSR[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
PMPCSR<55:32> = (if pc_sample.rw == '0' then Zeros(24) else pc_sample.pc<55:32>);
PMPCSR.EL = pc_sample.el;
PMPCSR.NS = pc_sample.ns;
PMCID1SR = pc_sample.contextidr;
PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN;
PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {EL1,EL0} && !pc_sample.el0h
then pc_sample.vmid else bits(16) UNKNOWN);
else
sample = OnesInterruptID id, signal level);(32);
if update then
PMPCSR<55:32> = bits(24) UNKNOWN;
PMPCSR.EL = bits(2) UNKNOWN;
PMPCSR.NS = bit UNKNOWN;
PMCID1SR = bits(32) UNKNOWN;
PMCID2SR = bits(32) UNKNOWN;
PMVIDSR.VMID = bits(16) UNKNOWN;
return sample;
// GetNumEventCounters()
// =====================
// Returns the number of event counters implemented. This is indicated to software at the
// highest Exception level by PMCR.N in AArch32 state, and PMCR_EL0.N in AArch64 state.
integer// CheckSoftwareStep()
// ===================
// Take a Software Step exception if in the active-pending state GetNumEventCounters()
return integer IMPLEMENTATION_DEFINED "Number of event counters";CheckSoftwareStep()
// Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from
// AArch32 state. However, because Software Step is only active when the debug target Exception
// level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions().
step_enabled = !ELUsingAArch32(DebugTarget()) && AArch64.GenerateDebugExceptions() && MDSCR_EL1.SS == '1';
if step_enabled && PSTATE.SS == '0' then
AArch64.SoftwareStepException();
// CreatePCSample()
// ================// DebugExceptionReturnSS()
// ========================
// Returns value to write to PSTATE.SS on an exception return or Debug state exit.
bit
CreatePCSample()
// In a simple sequential execution of the program, CreatePCSample is executed each time the PE
// executes an instruction that can be sampled. An implementation is not constrained such that
// reads of EDPCSRlo return the current values of PC, etc.
pc_sample.valid =DebugExceptionReturnSS(bits(N) spsr)
if ExternalNoninvasiveDebugAllowedUsingAArch32() && !() then
assert N == 32;
else
assert N == 64;
assertHalted();
pc_sample.pc =() || ThisInstrAddrRestarting();
pc_sample.el = PSTATE.EL;
pc_sample.rw = if() || PSTATE.EL != EL0;
if Restarting() then
enabled_at_source = FALSE;
elsif UsingAArch32() then '0' else '1';
pc_sample.ns = if() then
enabled_at_source = IsSecureAArch32.GenerateDebugExceptions() then '0' else '1';
pc_sample.contextidr = if();
else
enabled_at_source = ELUsingAArch32AArch64.GenerateDebugExceptions(();
ifEL1IllegalExceptionReturn) then CONTEXTIDR else CONTEXTIDR_EL1<31:0>;
pc_sample.has_el2 =(spsr) then
dest = PSTATE.EL;
else
(valid, dest) = EL2EnabledELFromSPSR();
(spsr); assert valid;
if dest_is_secure = EL2EnabledIsSecureBelowEL3() then
if() || dest == ELUsingAArch32EL3(;
dest_using_32 = (if dest ==EL2EL0) then
pc_sample.vmid =then spsr<4> == '1' else ZeroExtendELUsingAArch32(VTTBR.VMID, 16);
elsif !(dest));
if dest_using_32 then
enabled_at_dest =Have16bitVMIDAArch32.GenerateDebugExceptionsFrom() || VTCR_EL2.VS == '0' then
pc_sample.vmid =(dest, dest_is_secure);
else
mask = spsr<9>;
enabled_at_dest = ZeroExtendAArch64.GenerateDebugExceptionsFrom(VTTBR_EL2.VMID<7:0>, 16);
else
pc_sample.vmid = VTTBR_EL2.VMID;
if ((dest, dest_is_secure, mask);
ELd =HaveVirtHostExtDebugTargetFrom() ||(dest_is_secure);
if ! HaveV82Debug()) && !ELUsingAArch32(EL2) then
pc_sample.contextidr_el2 = CONTEXTIDR_EL2<31:0>;
else
pc_sample.contextidr_el2 = bits(32) UNKNOWN;
pc_sample.el0h = PSTATE.EL == EL0 && IsInHost();
return;(ELd) && MDSCR_EL1.SS == '1' && !enabled_at_source && enabled_at_dest then
SS_bit = spsr<21>;
else
SS_bit = '0';
return SS_bit;
// EDPCSRlo[] (read)
// =================
bits(32)// SSAdvance()
// ===========
// Advance the Software Step state machine. EDPCSRlo[boolean memory_mapped]
SSAdvance()
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
if // A simpler implementation of this function just clears PSTATE.SS to zero regardless of the
// current Software Step state machine. However, this check is made to illustrate that the
// processor only needs to consider advancing the state machine from the active-not-pending
// state.
target = HaveVirtHostExtDebugTarget() && EDSCR.SC2 == '1' then
EDPCSRhi.PC = (if pc_sample.rw == '0' then();
step_enabled = ! ZerosELUsingAArch32(24) else pc_sample.pc<55:32>);
EDPCSRhi.EL = pc_sample.el;
EDPCSRhi.NS = pc_sample.ns;
else
EDPCSRhi = (if pc_sample.rw == '0' then Zeros(32) else pc_sample.pc<63:32>);
EDCIDSR = pc_sample.contextidr;
if (HaveVirtHostExt() || HaveV82Debug()) && EDSCR.SC2 == '1' then
EDVIDSR = (if HaveEL(EL2) && pc_sample.ns == '1' then pc_sample.contextidr_el2
else bits(32) UNKNOWN);
else
if HaveEL(EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1,EL0} then
EDVIDSR.VMID = pc_sample.vmid;
else
EDVIDSR.VMID = Zeros();
EDVIDSR.NS = pc_sample.ns;
EDVIDSR.E2 = (if pc_sample.el == EL2 then '1' else '0');
EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw;
// The conditions for setting HV are not specified if PCSRhi is zero.
// An example implementation may be "pc_sample.rw".
EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1");
else
sample = Ones(32);
if update then
EDPCSRhi = bits(32) UNKNOWN;
EDCIDSR = bits(32) UNKNOWN;
EDVIDSR = bits(32) UNKNOWN;
(target) && MDSCR_EL1.SS == '1';
active_not_pending = step_enabled && PSTATE.SS == '1';
return sample; if active_not_pending then PSTATE.SS = '0';
return;
type// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
// Might return TRUE or FALSE if the previously executed instruction was an ISB or ERET executed
// in the active-not-pending state, or if another exception was taken before the Software Step exception.
// Returns FALSE otherwise, indicating that the previously executed instruction was executed in the
// active-not-pending state, that is, the instruction was stepped.
boolean PCSample is (
boolean valid,
bits(64) pc,
bits(2) el,
bit rw,
bit ns,
boolean has_el2,
bits(32) contextidr,
bits(32) contextidr_el2,
boolean el0h,
bits(16) vmid
)SoftwareStep_DidNotStep();
PCSample pc_sample;
// PMPCSR[] (read)
// ===============
bits(32)// Returns a value that describes the previously executed instruction. The result is valid only if
// SoftwareStep_DidNotStep() returns FALSE.
// Might return TRUE or FALSE if the instruction was an AArch32 LDREX or LDAEX that failed its condition code test.
// Otherwise returns TRUE if the instruction was a Load-Exclusive class instruction, and FALSE if the
// instruction was not a Load-Exclusive class instruction.
boolean PMPCSR[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "generate error response";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
PMPCSR<55:32> = (if pc_sample.rw == '0' thenSoftwareStep_SteppedEX(); Zeros(24) else pc_sample.pc<55:32>);
PMPCSR.EL = pc_sample.el;
PMPCSR.NS = pc_sample.ns;
PMCID1SR = pc_sample.contextidr;
PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN;
PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {EL1,EL0} && !pc_sample.el0h
then pc_sample.vmid else bits(16) UNKNOWN);
else
sample = Ones(32);
if update then
PMPCSR<55:32> = bits(24) UNKNOWN;
PMPCSR.EL = bits(2) UNKNOWN;
PMPCSR.NS = bit UNKNOWN;
PMCID1SR = bits(32) UNKNOWN;
PMCID2SR = bits(32) UNKNOWN;
PMVIDSR.VMID = bits(16) UNKNOWN;
return sample;
// CheckSoftwareStep()
// ConditionSyndrome()
// ===================
// Take a Software Step exception if in the active-pending state// Return CV and COND fields of instruction syndrome
bits(5)
CheckSoftwareStep()
ConditionSyndrome()
// Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from
// AArch32 state. However, because Software Step is only active when the debug target Exception
// level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions().
step_enabled = ! bits(5) syndrome;
ifELUsingAArch32UsingAArch32(() then
cond =DebugTargetAArch32.CurrentCond()) &&();
if PSTATE.T == '0' then // A32
syndrome<4> = '1';
// A conditional A32 instruction that is known to pass its condition code check
// can be presented either with COND set to 0xE, the value for unconditional, or
// the COND value held in the instruction.
if AArch64.GenerateDebugExceptionsConditionHolds() && MDSCR_EL1.SS == '1';
if step_enabled && PSTATE.SS == '0' then(cond) &&
(Unpredictable_ESRCONDPASSAArch64.SoftwareStepExceptionConstrainUnpredictableBool();) then
syndrome<3:0> = '1110';
else
syndrome<3:0> = cond;
else // T32
// When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether:
// * CV set to 0 and COND is set to an UNKNOWN value
// * CV set to 1 and COND is set to the condition code for the condition that
// applied to the instruction.
if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then
syndrome<4> = '1';
syndrome<3:0> = cond;
else
syndrome<4> = '0';
syndrome<3:0> = bits(4) UNKNOWN;
else
syndrome<4> = '1';
syndrome<3:0> = '1110';
return syndrome;
// DebugExceptionReturnSS()
// ========================
// Returns value to write to PSTATE.SS on an exception return or Debug state exit.
bitenumeration DebugExceptionReturnSS(bits(N) spsr)
ifException { UsingAArch32() then
assert N == 32;
else
assert N == 64;
assertException_Uncategorized, // Uncategorized or unknown reason Halted() ||Exception_WFxTrap, // Trapped WFI or WFE instruction Restarting() || PSTATE.EL !=Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15 EL0;
ifException_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15 Restarting() then
enabled_at_source = FALSE;
elsifException_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14 UsingAArch32() then
enabled_at_source =Exception_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14 AArch32.GenerateDebugExceptions();
else
enabled_at_source =Exception_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FP AArch64.GenerateDebugExceptions();
ifException_FPIDTrap, // Trapped access to SIMD or FP ID register IllegalExceptionReturn(spsr) then
dest = PSTATE.EL;
else
(valid, dest) =Exception_LDST64BTrap, // Trapped access to ST64BV, ST64BV0, ST64B and LD64B
// Trapped BXJ instruction not supported in Armv8 ELFromSPSR(spsr); assert valid;
dest_is_secure =Exception_PACTrap, // Trapped invalid PAC use IsSecureBelowEL3() || dest ==Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32 EL3;
dest_using_32 = (if dest ==Exception_IllegalState, // Illegal Execution state EL0 then spsr<4> == '1' elseException_SupervisorCall, // Supervisor Call ELUsingAArch32(dest));
if dest_using_32 then
enabled_at_dest =Exception_HypervisorCall, // Hypervisor Call AArch32.GenerateDebugExceptionsFrom(dest, dest_is_secure);
else
mask = spsr<9>;
enabled_at_dest =Exception_MonitorCall, // Monitor Call or Trapped SMC instruction AArch64.GenerateDebugExceptionsFrom(dest, dest_is_secure, mask);
ELd =Exception_SystemRegisterTrap, // Trapped MRS or MSR system register access DebugTargetFrom(dest_is_secure);
if !Exception_ERetTrap, // Trapped invalid ERET useException_InstructionAbort, // Instruction Abort or Prefetch Abort
Exception_PCAlignment, // PC alignment fault
Exception_DataAbort, // Data Abort
Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2
Exception_PACFail, // PAC Authentication failure
Exception_SPAlignment, // SP alignment fault
Exception_FPTrappedException, // IEEE trapped FP exception
Exception_SError, // SError interrupt
Exception_Breakpoint, // (Hardware) Breakpoint
Exception_SoftwareStep, // Software Step
Exception_Watchpoint, // Watchpoint
Exception_NV2Watchpoint, // Watchpoint at EL1 reported as being from EL2
Exception_SoftwareBreakpoint, // Software Breakpoint Instruction
Exception_VectorCatch, // AArch32 Vector Catch
Exception_IRQ, // IRQ interrupt
Exception_SVEAccessTrap, // HCPTR trapped access to SVE
Exception_BranchTarget, // Branch Target Identification
ELUsingAArch32(ELd) && MDSCR_EL1.SS == '1' && !enabled_at_source && enabled_at_dest then
SS_bit = spsr<21>;
else
SS_bit = '0';
return SS_bit;Exception_FIQ}; // FIQ interrupt
// SSAdvance()
// ===========
// Advance the Software Step state machine.type
SSAdvance()
// A simpler implementation of this function just clears PSTATE.SS to zero regardless of the
// current Software Step state machine. However, this check is made to illustrate that the
// processor only needs to consider advancing the state machine from the active-not-pending
// state.
target =ExceptionRecord is ( DebugTargetException();
step_enabled = !ELUsingAArch32(target) && MDSCR_EL1.SS == '1';
active_not_pending = step_enabled && PSTATE.SS == '1';
if active_not_pending then PSTATE.SS = '0';
return;exceptype, // Exception class
bits(25) syndrome, // Syndrome record
bits(5) syndrome2, // ST64BV(0) return value register specifier
bits(64) vaddress, // Virtual fault address
boolean ipavalid, // Physical fault address for second stage faults is valid
bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure
bits(52) ipaddress) // Physical fault address for second stage faults
// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
// Might return TRUE or FALSE if the previously executed instruction was an ISB or ERET executed
// in the active-not-pending state, or if another exception was taken before the Software Step exception.
// Returns FALSE otherwise, indicating that the previously executed instruction was executed in the
// active-not-pending state, that is, the instruction was stepped.
boolean// ExceptionSyndrome()
// ===================
// Return a blank exception syndrome record for an exception of the given type.
ExceptionRecord SoftwareStep_DidNotStep();ExceptionSyndrome(Exception exceptype)
ExceptionRecord r;
r.exceptype = exceptype;
// Initialize all other fields
r.syndrome = Zeros();
r.syndrome2 = Zeros();
r.vaddress = Zeros();
r.ipavalid = FALSE;
r.NS = '0';
r.ipaddress = Zeros();
return r;
// Returns a value that describes the previously executed instruction. The result is valid only if
// SoftwareStep_DidNotStep() returns FALSE.
// Might return TRUE or FALSE if the instruction was an AArch32 LDREX or LDAEX that failed its condition code test.
// Otherwise returns TRUE if the instruction was a Load-Exclusive class instruction, and FALSE if the
// instruction was not a Load-Exclusive class instruction.
boolean// ReservedValue()
// =============== SoftwareStep_SteppedEX();ReservedValue()
ifUsingAArch32() && !AArch32.GeneralExceptionsToAArch64() then
AArch32.TakeUndefInstrException();
else
AArch64.UndefinedFault();
// ConditionSyndrome()
// ===================
// Return CV and COND fields of instruction syndrome
bits(5)// UnallocatedEncoding()
// ===================== ConditionSyndrome()
bits(5) syndrome;
UnallocatedEncoding()
if UsingAArch32() then
cond =() && AArch32.CurrentCondAArch32.ExecutingCP10or11Instr();
if PSTATE.T == '0' then // A32
syndrome<4> = '1';
// A conditional A32 instruction that is known to pass its condition code check
// can be presented either with COND set to 0xE, the value for unconditional, or
// the COND value held in the instruction.
if() then
FPEXC.DEX = '0';
if ConditionHoldsUsingAArch32(cond) &&() && ! ConstrainUnpredictableBoolAArch32.GeneralExceptionsToAArch64(() then();
else
AArch64.UndefinedFaultUnpredictable_ESRCONDPASSAArch32.TakeUndefInstrException) then
syndrome<3:0> = '1110';
else
syndrome<3:0> = cond;
else // T32
// When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether:
// * CV set to 0 and COND is set to an UNKNOWN value
// * CV set to 1 and COND is set to the condition code for the condition that
// applied to the instruction.
if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then
syndrome<4> = '1';
syndrome<3:0> = cond;
else
syndrome<4> = '0';
syndrome<3:0> = bits(4) UNKNOWN;
else
syndrome<4> = '1';
syndrome<3:0> = '1110';
return syndrome;();
enumeration// EncodeLDFSC()
// =============
// Function that gives the Long-descriptor FSC code for types of Fault
bits(6) Exception {EncodeLDFSC(Exception_Uncategorized, // Uncategorized or unknown reasonstatuscode, integer level)
bits(6) result;
case statuscode of
when
Exception_WFxTrap, // Trapped WFI or WFE instructionresult = '0000':level<1:0>; assert level IN {0,1,2,3};
when
Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15result = '0010':level<1:0>; assert level IN {1,2,3};
when
Exception_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15result = '0011':level<1:0>; assert level IN {1,2,3};
when
Exception_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14result = '0001':level<1:0>; assert level IN {0,1,2,3};
when
Exception_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14result = '010000';
when
Exception_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FPresult = '0101':level<1:0>; assert level IN {0,1,2,3};
when
Exception_FPIDTrap, // Trapped access to SIMD or FP ID registerresult = '011000';
when
Exception_LDST64BTrap, // Trapped access to ST64BV, ST64BV0, ST64B and LD64B
// Trapped BXJ instruction not supported in Armv8result = '0111':level<1:0>; assert level IN {0,1,2,3};
when
Exception_PACTrap, // Trapped invalid PAC useresult = '011001';
when
Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32result = '010001';
when
Exception_IllegalState, // Illegal Execution stateresult = '100001';
when
Exception_SupervisorCall, // Supervisor Callresult = '100010';
when
Exception_HypervisorCall, // Hypervisor Callresult = '110000';
when
Exception_MonitorCall, // Monitor Call or Trapped SMC instructionresult = '110001';
when
Exception_SystemRegisterTrap, // Trapped MRS or MSR system register accessresult = '110100'; // IMPLEMENTATION DEFINED
when
Exception_ERetTrap, // Trapped invalid ERET useresult = '110101'; // IMPLEMENTATION DEFINED
otherwise
Exception_InstructionAbort, // Instruction Abort or Prefetch Abort
Exception_PCAlignment, // PC alignment fault
Exception_DataAbort, // Data Abort
Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2
Exception_PACFail, // PAC Authentication failure
Exception_SPAlignment, // SP alignment fault
Exception_FPTrappedException, // IEEE trapped FP exception
Exception_SError, // SError interrupt
Exception_Breakpoint, // (Hardware) Breakpoint
Exception_SoftwareStep, // Software Step
Exception_Watchpoint, // Watchpoint
Exception_NV2Watchpoint, // Watchpoint at EL1 reported as being from EL2
Exception_SoftwareBreakpoint, // Software Breakpoint Instruction
Exception_VectorCatch, // AArch32 Vector Catch
Exception_IRQ, // IRQ interrupt
Exception_SVEAccessTrap, // HCPTR trapped access to SVE
Exception_BranchTarget, // Branch Target Identification
Exception_FIQ}; // FIQ interrupt();
return result;
type// IPAValid()
// ==========
// Return TRUE if the IPA is reported for the abort
boolean ExceptionRecord is (IPAValid( fault)
assert fault.statuscode != Fault_None;
if fault.s2fs1walk then
return fault.statuscode IN {Fault_AccessFlag, Fault_Permission, Fault_Translation,
Fault_AddressSize};
elsif fault.secondstage then
return fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_AddressSizeExceptionFaultRecord exceptype, // Exception class
bits(25) syndrome, // Syndrome record
bits(5) syndrome2, // ST64BV(0) return value register specifier
bits(64) vaddress, // Virtual fault address
boolean ipavalid, // Physical fault address for second stage faults is valid
bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure
bits(52) ipaddress) // Physical fault address for second stage faults};
else
return FALSE;
// ExceptionSyndrome()
// ===================
// Return a blank exception syndrome record for an exception of the given type.
// IsAsyncAbort()
// ==============
// Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE
// otherwise.
ExceptionRecordboolean ExceptionSyndrome(IsAsyncAbort(ExceptionFault exceptype)statuscode)
assert statuscode !=
ExceptionRecordFault_None r;
;
r.exceptype = exceptype;
// Initialize all other fields
r.syndrome = return (statuscode IN { ZerosFault_AsyncExternal();
r.syndrome2 =, ZerosFault_AsyncParity();
r.vaddress =});
// IsAsyncAbort()
// ==============
boolean ZerosIsAsyncAbort();
r.ipavalid = FALSE;
r.NS = '0';
r.ipaddress =( fault)
return IsAsyncAbortZerosFaultRecord();
return r;(fault.statuscode);
// EncodeLDFSC()
// =============
// Function that gives the Long-descriptor FSC code for types of Fault
// IsDebugException()
// ==================
bits(6)boolean EncodeLDFSC(IsDebugException(FaultFaultRecord statuscode, integer level)
bits(6) result;
case statuscode of
whenfault)
assert fault.statuscode != Fault_AddressSizeFault_None result = '0000':level<1:0>; assert level IN {0,1,2,3};
when;
return fault.statuscode == Fault_AccessFlag result = '0010':level<1:0>; assert level IN {1,2,3};
when Fault_Permission result = '0011':level<1:0>; assert level IN {1,2,3};
when Fault_Translation result = '0001':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncExternal result = '010000';
when Fault_SyncExternalOnWalk result = '0101':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncParity result = '011000';
when Fault_SyncParityOnWalk result = '0111':level<1:0>; assert level IN {0,1,2,3};
when Fault_AsyncParity result = '011001';
when Fault_AsyncExternal result = '010001';
when Fault_Alignment result = '100001';
when Fault_Debug result = '100010';
when Fault_TLBConflict result = '110000';
when Fault_HWUpdateAccessFlag result = '110001';
when Fault_Lockdown result = '110100'; // IMPLEMENTATION DEFINED
when Fault_Exclusive result = '110101'; // IMPLEMENTATION DEFINED
otherwise Unreachable();
return result;;
// IPAValid()
// ==========
// Return TRUE if the IPA is reported for the abort
// IsExternalAbort()
// =================
// Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise.
boolean IPAValid(IsExternalAbort(FaultRecordFault fault)
assert fault.statuscode !=statuscode)
assert statuscode != Fault_None;
if fault.s2fs1walk then
return fault.statuscode IN { return (statuscode IN {Fault_AccessFlagFault_SyncExternal, Fault_PermissionFault_SyncParity, Fault_TranslationFault_SyncExternalOnWalk,
Fault_AddressSizeFault_SyncParityOnWalk};
elsif fault.secondstage then
return fault.statuscode IN {,Fault_AccessFlagFault_AsyncExternal, Fault_TranslationFault_AsyncParity,});
// IsExternalAbort()
// =================
boolean (FaultRecord fault)
return IsExternalAbortFault_AddressSizeIsExternalAbort};
else
return FALSE;(fault.statuscode);
// IsAsyncAbort()
// ==============
// Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE
// otherwise.
// IsExternalSyncAbort()
// =====================
// Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise.
boolean IsAsyncAbort(IsExternalSyncAbort(Fault statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_AsyncExternalFault_SyncExternal, Fault_AsyncParityFault_SyncParity});
// IsAsyncAbort()
// ==============
boolean, IsAsyncAbortFault_SyncExternalOnWalk(,Fault_SyncParityOnWalk});
// IsExternalSyncAbort()
// =====================
boolean IsExternalSyncAbort(FaultRecord fault)
return IsAsyncAbortIsExternalSyncAbort(fault.statuscode);
// IsDebugException()
// ==================
// IsFault()
// =========
// Return TRUE if a fault is associated with an address descriptor
boolean IsDebugException(IsFault(FaultRecordAddressDescriptor fault)
assert fault.statuscode !=addrdesc)
return addrdesc.fault.statuscode != Fault_None;
return fault.statuscode == Fault_Debug;
// IsExternalAbort()
// =================
// Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise.
// IsSErrorInterrupt()
// ===================
// Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE
// otherwise.
boolean IsExternalAbort(IsSErrorInterrupt(Fault statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk,
Fault_AsyncExternal, Fault_AsyncParity });
// IsExternalAbort()
// =================
// IsSErrorInterrupt()
// ===================
boolean IsExternalAbortIsSErrorInterrupt(FaultRecord fault)
return IsExternalAbortIsSErrorInterrupt(fault.statuscode);
// IsExternalSyncAbort()
// =====================
// Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise.
// IsSecondStage()
// ===============
boolean IsExternalSyncAbort(IsSecondStage(FaultFaultRecord statuscode)
assert statuscode !=fault)
assert fault.statuscode != Fault_None;
return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk});
// IsExternalSyncAbort()
// =====================
boolean IsExternalSyncAbort(FaultRecord fault)
return IsExternalSyncAbort(fault.statuscode);;
return fault.secondstage;
// IsFault()
// =========
// Return TRUE if a fault is associated with an address descriptor
boolean// Returns the extended syndrome information for a second stage fault.
// <10> - Syndrome valid bit. The syndrome is only valid for certain types of access instruction.
// <9:8> - Access size.
// <7> - Sign extended (for loads).
// <6:2> - Transfer register.
// <1> - Transfer register is 64-bit.
// <0> - Instruction has acquire/release semantics.
bits(11) IsFault(LSInstructionSyndrome();AddressDescriptor addrdesc)
return addrdesc.fault.statuscode != Fault_None;
// IsSErrorInterrupt()
// ===================
// Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE
// otherwise.
// ASR()
// =====
booleanbits(N) IsSErrorInterrupt(ASR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =FaultASR_C statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_AsyncExternal, Fault_AsyncParity});
// IsSErrorInterrupt()
// ===================
boolean IsSErrorInterrupt(FaultRecord fault)
return IsSErrorInterrupt(fault.statuscode);(x, shift);
return result;
// IsSecondStage()
// ===============
// ASR_C()
// =======
boolean(bits(N), bit) IsSecondStage(ASR_C(bits(N) x, integer shift)
assert shift > 0;
extended_x =FaultRecordSignExtend fault)
assert fault.statuscode != Fault_None;
return fault.secondstage;(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
// Returns the extended syndrome information for a second stage fault.
// <10> - Syndrome valid bit. The syndrome is only valid for certain types of access instruction.
// <9:8> - Access size.
// <7> - Sign extended (for loads).
// <6:2> - Transfer register.
// <1> - Transfer register is 64-bit.
// <0> - Instruction has acquire/release semantics.
bits(11)// Abs()
// =====
integer LSInstructionSyndrome();Abs(integer x)
return if x >= 0 then x else -x;
// Abs()
// =====
realAbs(real x)
return if x >= 0.0 then x else -x;
// ASR()
// =====
// Align()
// =======
bits(N)integer ASR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =Align(integer x, integer y)
return y * (x DIV y);
// Align()
// =======
bits(N) (bits(N) x, integer y)
return Align(UIntASR_CAlign(x, shift);
return result;(x), y)<N-1:0>;
// ASR_C()
// =======
// BitCount()
// ==========
(bits(N), bit)integer ASR_C(bits(N) x, integer shift)
assert shift > 0;
extended_x =BitCount(bits(N) x)
integer result = 0;
for i = 0 to N-1
if x<i> == '1' then
result = result + 1;
return result; SignExtend(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
// Abs()
// =====
// CountLeadingSignBits()
// ======================
integer Abs(integer x)
return if x >= 0 then x else -x;
// Abs()
// =====
realCountLeadingSignBits(bits(N) x)
return AbsCountLeadingZeroBits(real x)
return if x >= 0.0 then x else -x;(x<N-1:1> EOR x<N-2:0>);
// Align()
// =======
// CountLeadingZeroBits()
// ======================
integer Align(integer x, integer y)
return y * (x DIV y);
// Align()
// =======
bits(N)CountLeadingZeroBits(bits(N) x)
return N - ( AlignHighestSetBit(bits(N) x, integer y)
return Align(UInt(x), y)<N-1:0>;(x) + 1);
// BitCount()
// ==========
// Elem[] - non-assignment form
// ============================
integerbits(size) BitCount(bits(N) x)
integer result = 0;
for i = 0 to N-1
if x<i> == '1' then
result = result + 1;
return result;Elem[bits(N) vector, integer e, integer size]
assert e >= 0 && (e+1)*size <= N;
return vector<e*size+size-1 : e*size>;
// Elem[] - non-assignment form
// ============================
bits(size)Elem[bits(N) vector, integer e]
return Elem[vector, e, size];
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e, integer size] = bits(size) value
assert e >= 0 && (e+1)*size <= N;
vector<(e+1)*size-1:e*size> = value;
return;
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e] = bits(size) value
Elem[vector, e, size] = value;
return;
// CountLeadingSignBits()
// ======================
// Extend()
// ========
integerbits(N) CountLeadingSignBits(bits(N) x)
returnExtend(bits(M) x, integer N, boolean unsigned)
return if unsigned then (x, N) else SignExtend(x, N);
// Extend()
// ========
bits(N) Extend(bits(M) x, boolean unsigned)
return ExtendCountLeadingZeroBitsZeroExtend(x<N-1:1> EOR x<N-2:0>);(x, N, unsigned);
// CountLeadingZeroBits()
// ======================
// HighestSetBit()
// ===============
integer CountLeadingZeroBits(bits(N) x)
return N - (HighestSetBit(bits(N) x)
for i = N-1 downto 0
if x<i> == '1' then return i;
return -1;HighestSetBit(x) + 1);
// Elem[] - non-assignment form
// ============================
// Int()
// =====
bits(size)integer Elem[bits(N) vector, integer e, integer size]
assert e >= 0 && (e+1)*size <= N;
return vector<e*size+size-1 : e*size>;
// Elem[] - non-assignment form
// ============================
bits(size)Int(bits(N) x, boolean unsigned)
result = if unsigned then Elem[bits(N) vector, integer e]
return(x) else ElemSInt[vector, e, size];
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e, integer size] = bits(size) value
assert e >= 0 && (e+1)*size <= N;
vector<(e+1)*size-1:e*size> = value;
return;
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e] = bits(size) value
Elem[vector, e, size] = value;
return;(x);
return result;
// Extend()
// IsOnes()
// ========
bits(N)boolean Extend(bits(M) x, integer N, boolean unsigned)
return if unsigned thenIsOnes(bits(N) x)
return x == ZeroExtendOnes(x, N) else SignExtend(x, N);
// Extend()
// ========
bits(N) Extend(bits(M) x, boolean unsigned)
return Extend(x, N, unsigned);(N);
// HighestSetBit()
// ===============
// IsZero()
// ========
integerboolean HighestSetBit(bits(N) x)
for i = N-1 downto 0
if x<i> == '1' then return i;
return -1;IsZero(bits(N) x)
return x ==Zeros(N);
// Int()
// =====
// IsZeroBit()
// ===========
integerbit Int(bits(N) x, boolean unsigned)
result = if unsigned thenIsZeroBit(bits(N) x)
return if UIntIsZero(x) else SInt(x);
return result;(x) then '1' else '0';
// IsOnes()
// ========
// LSL()
// =====
booleanbits(N) IsOnes(bits(N) x)
return x ==LSL(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = OnesLSL_C(N);(x, shift);
return result;
// IsZero()
// ========
// LSL_C()
// =======
boolean(bits(N), bit) IsZero(bits(N) x)
return x ==LSL_C(bits(N) x, integer shift)
assert shift > 0;
extended_x = x : Zeros(N);(shift);
result = extended_x<N-1:0>;
carry_out = extended_x<N>;
return (result, carry_out);
// IsZeroBit()
// ===========
// LSR()
// =====
bitbits(N) IsZeroBit(bits(N) x)
return ifLSR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = IsZeroLSR_C(x) then '1' else '0';(x, shift);
return result;
// LSL()
// =====
// LSR_C()
// =======
bits(N)(bits(N), bit) LSL(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =LSR_C(bits(N) x, integer shift)
assert shift > 0;
extended_x = LSL_CZeroExtend(x, shift);
return result;(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
// LSL_C()
// =======
// LowestSetBit()
// ==============
(bits(N), bit)integer LSL_C(bits(N) x, integer shift)
assert shift > 0;
extended_x = x :LowestSetBit(bits(N) x)
for i = 0 to N-1
if x<i> == '1' then return i;
return N; Zeros(shift);
result = extended_x<N-1:0>;
carry_out = extended_x<N>;
return (result, carry_out);
// LSR()
// Max()
// =====
bits(N)integer LSR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =Max(integer a, integer b)
return if a >= b then a else b;
// Max()
// =====
real LSR_CMax(x, shift);
return result;(real a, real b)
return if a >= b then a else b;
// LSR_C()
// =======
// Min()
// =====
(bits(N), bit)integer LSR_C(bits(N) x, integer shift)
assert shift > 0;
extended_x =Min(integer a, integer b)
return if a <= b then a else b;
// Min()
// =====
real ZeroExtendMin(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);(real a, real b)
return if a <= b then a else b;
// LowestSetBit()
// ==============
// Ones()
// ======
integerbits(N) LowestSetBit(bits(N) x)
for i = 0 to N-1
if x<i> == '1' then return i;
return N;Ones(integer N)
returnReplicate('1',N);
// Ones()
// ======
bits(N) Ones()
return Ones(N);
// Max()
// ROR()
// =====
integerbits(N) Max(integer a, integer b)
return if a >= b then a else b;
// Max()
// =====
realROR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = MaxROR_C(real a, real b)
return if a >= b then a else b;(x, shift);
return result;
// Min()
// =====
// ROR_C()
// =======
integer(bits(N), bit) Min(integer a, integer b)
return if a <= b then a else b;
// Min()
// =====
realROR_C(bits(N) x, integer shift)
assert shift != 0;
m = shift MOD N;
result = (x,m) OR LSLMinLSR(real a, real b)
return if a <= b then a else b;(x,N-m);
carry_out = result<N-1>;
return (result, carry_out);
// Ones()
// ======
// Replicate()
// ===========
bits(N) Ones(integer N)
Replicate(bits(M) x)
assert N MOD M == 0;
return Replicate('1',N);
(x, N DIV M);
// Ones()
// ======
bits(N)bits(M*N) Ones()
returnReplicate(bits(M) x, integer N); Ones(N);
// ROR()
// =====
bits(N)integer ROR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =RoundDown(real x); ROR_C(x, shift);
return result;
// ROR_C()
// =======
// RoundTowardsZero()
// ==================
(bits(N), bit)integer ROR_C(bits(N) x, integer shift)
assert shift != 0;
m = shift MOD N;
result =RoundTowardsZero(real x)
return if x == 0.0 then 0 else if x >= 0.0 then LSRRoundDown(x,m) OR(x) else LSLRoundUp(x,N-m);
carry_out = result<N-1>;
return (result, carry_out);(x);
// Replicate()
// ===========
bits(N)integer Replicate(bits(M) x)
assert N MOD M == 0;
returnRoundUp(real x); Replicate(x, N DIV M);
bits(M*N) Replicate(bits(M) x, integer N);
// SInt()
// ======
integer RoundDown(real x);SInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
if x<N-1> == '1' then result = result - 2^N;
return result;
// RoundTowardsZero()
// ==================
// SignExtend()
// ============
integerbits(N) RoundTowardsZero(real x)
return if x == 0.0 then 0 else if x >= 0.0 thenSignExtend(bits(M) x, integer N)
assert N >= M;
return RoundDownReplicate(x) else(x<M-1>, N-M) : x;
// SignExtend()
// ============
bits(N) SignExtend(bits(M) x)
return SignExtendRoundUp(x);(x, N);
// UInt()
// ======
integer RoundUp(real x);UInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
return result;
// SInt()
// ======
// ZeroExtend()
// ============
integerbits(N) SInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
if x<N-1> == '1' then result = result - 2^N;
return result;ZeroExtend(bits(M) x, integer N)
assert N >= M;
returnZeros(N-M) : x;
// ZeroExtend()
// ============
bits(N) ZeroExtend(bits(M) x)
return ZeroExtend(x, N);
// SignExtend()
// ============
// Zeros()
// =======
bits(N) SignExtend(bits(M) x, integer N)
assert N >= M;
Zeros(integer N)
return Replicate(x<M-1>, N-M) : x;
('0',N);
// SignExtend()
// ============
// Zeros()
// =======
bits(N) SignExtend(bits(M) x)
Zeros()
return SignExtendZeros(x, N);(N);
// UInt()
// ======
// BitReverse()
// ============
integerbits(N) UInt(bits(N) x)
result = 0;
BitReverse(bits(N) data)
bits(N) result;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
result<N-i-1> = data<i>;
return result;
// ZeroExtend()
// HaveCRCExt()
// ============
bits(N)boolean ZeroExtend(bits(M) x, integer N)
assert N >= M;
HaveCRCExt()
return ZerosHasArchVersion(N-M) : x;
// ZeroExtend()
// ============
bits(N)( ZeroExtend(bits(M) x)
return ZeroExtend(x, N);) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";
// Zeros()
// =======
// Poly32Mod2()
// ============
bits(N)// Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation
bits(32) Zeros(integer N)
returnPoly32Mod2(bits(N) data, bits(32) poly)
assert N > 32;
for i = N-1 downto 32
if data<i> == '1' then
data<i-1:0> = data<i-1:0> EOR (poly: Replicate('0',N);
// Zeros()
// =======
bits(N) Zeros()
return Zeros(N);(i-32));
return data<31:0>;
// BitReverse()
// ============
// AESInvMixColumns()
// ==================
// Transformation in the Inverse Cipher that is the inverse of AESMixColumns.
bits(N)bits(128) BitReverse(bits(N) data)
bits(N) result;
for i = 0 to N-1
result<N-i-1> = data<i>;
return result;AESInvMixColumns(bits (128) op)
bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>;
bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>;
bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>;
bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>;
bits(4*8) out0;
bits(4*8) out1;
bits(4*8) out2;
bits(4*8) out3;
for c = 0 to 3
out0<c*8+:8> =FFmul0E(in0<c*8+:8>) EOR FFmul0B(in1<c*8+:8>) EOR FFmul0D(in2<c*8+:8>) EOR FFmul09(in3<c*8+:8>);
out1<c*8+:8> = FFmul09(in0<c*8+:8>) EOR FFmul0E(in1<c*8+:8>) EOR FFmul0B(in2<c*8+:8>) EOR FFmul0D(in3<c*8+:8>);
out2<c*8+:8> = FFmul0D(in0<c*8+:8>) EOR FFmul09(in1<c*8+:8>) EOR FFmul0E(in2<c*8+:8>) EOR FFmul0B(in3<c*8+:8>);
out3<c*8+:8> = FFmul0B(in0<c*8+:8>) EOR FFmul0D(in1<c*8+:8>) EOR FFmul09(in2<c*8+:8>) EOR FFmul0E(in3<c*8+:8>);
return (
out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> :
out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> :
out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> :
out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8>
);
// HaveCRCExt()
// ============
// AESInvShiftRows()
// =================
// Transformation in the Inverse Cipher that is inverse of AESShiftRows.
booleanbits(128) HaveCRCExt()
returnAESInvShiftRows(bits(128) op)
return (
op< 24+:8> : op< 48+:8> : op< 72+:8> : op< 96+:8> :
op<120+:8> : op< 16+:8> : op< 40+:8> : op< 64+:8> :
op< 88+:8> : op<112+:8> : op< 8+:8> : op< 32+:8> :
op< 56+:8> : op< 80+:8> : op<104+:8> : op< 0+:8>
); HasArchVersion(ARMv8p1) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";
// Poly32Mod2()
// ============
// AESInvSubBytes()
// ================
// Transformation in the Inverse Cipher that is the inverse of AESSubBytes.
// Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation
bits(32)bits(128) Poly32Mod2(bits(N) data, bits(32) poly)
assert N > 32;
for i = N-1 downto 32
if data<i> == '1' then
data<i-1:0> = data<i-1:0> EOR (poly:AESInvSubBytes(bits(128) op)
// Inverse S-box values
bits(16*16*8) GF2_inv = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x7d0c2155631469e126d677ba7e042b17<127:0> :
/*E*/ 0x619953833cbbebc8b0f52aae4d3be0a0<127:0> :
/*D*/ 0xef9cc9939f7ae52d0d4ab519a97f5160<127:0> :
/*C*/ 0x5fec8027591012b131c7078833a8dd1f<127:0> :
/*B*/ 0xf45acd78fec0db9a2079d2c64b3e56fc<127:0> :
/*A*/ 0x1bbe18aa0e62b76f89c5291d711af147<127:0> :
/*9*/ 0x6edf751ce837f9e28535ade72274ac96<127:0> :
/*8*/ 0x73e6b4f0cecff297eadc674f4111913a<127:0> :
/*7*/ 0x6b8a130103bdafc1020f3fca8f1e2cd0<127:0> :
/*6*/ 0x0645b3b80558e4f70ad3bc8c00abd890<127:0> :
/*5*/ 0x849d8da75746155edab9edfd5048706c<127:0> :
/*4*/ 0x92b6655dcc5ca4d41698688664f6f872<127:0> :
/*3*/ 0x25d18b6d49a25b76b224d92866a12e08<127:0> :
/*2*/ 0x4ec3fa420b954cee3d23c2a632947b54<127:0> :
/*1*/ 0xcbe9dec444438e3487ff2f9b8239e37c<127:0> :
/*0*/ 0xfbd7f3819ea340bf38a53630d56a0952<127:0>
);
bits(128) out;
for i = 0 to 15
out<i*8+:8> = GF2_inv<ZerosUInt(i-32));
return data<31:0>;(op<i*8+:8>)*8+:8>;
return out;
// AESInvMixColumns()
// ==================
// Transformation in the Inverse Cipher that is the inverse of AESMixColumns.
// AESMixColumns()
// ===============
// Transformation in the Cipher that takes all of the columns of the
// State and mixes their data (independently of one another) to
// produce new columns.
bits(128) AESInvMixColumns(bits (128) op)
AESMixColumns(bits (128) op)
bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>;
bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>;
bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>;
bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>;
bits(4*8) out0;
bits(4*8) out1;
bits(4*8) out2;
bits(4*8) out3;
for c = 0 to 3
out0<c*8+:8> = FFmul0EFFmul02(in0<c*8+:8>) EOR FFmul0BFFmul03(in1<c*8+:8>) EOR(in1<c*8+:8>) EOR in2<c*8+:8> EOR in3<c*8+:8>;
out1<c*8+:8> = in0<c*8+:8> EOR FFmul0DFFmul02(in2<c*8+:8>) EOR(in1<c*8+:8>) EOR FFmul09FFmul03(in3<c*8+:8>);
out1<c*8+:8> =(in2<c*8+:8>) EOR in3<c*8+:8>;
out2<c*8+:8> = in0<c*8+:8> EOR in1<c*8+:8> EOR FFmul09FFmul02(in0<c*8+:8>) EOR(in2<c*8+:8>) EOR FFmul0EFFmul03(in1<c*8+:8>) EOR(in3<c*8+:8>);
out3<c*8+:8> = FFmul0BFFmul03(in2<c*8+:8>) EOR(in0<c*8+:8>) EOR in1<c*8+:8> EOR in2<c*8+:8> EOR FFmul0DFFmul02(in3<c*8+:8>);
out2<c*8+:8> = FFmul0D(in0<c*8+:8>) EOR FFmul09(in1<c*8+:8>) EOR FFmul0E(in2<c*8+:8>) EOR FFmul0B(in3<c*8+:8>);
out3<c*8+:8> = FFmul0B(in0<c*8+:8>) EOR FFmul0D(in1<c*8+:8>) EOR FFmul09(in2<c*8+:8>) EOR FFmul0E(in3<c*8+:8>);
return (
out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> :
out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> :
out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> :
out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8>
);
// AESInvShiftRows()
// =================
// Transformation in the Inverse Cipher that is inverse of AESShiftRows.
// AESShiftRows()
// ==============
// Transformation in the Cipher that processes the State by cyclically
// shifting the last three rows of the State by different offsets.
bits(128) AESInvShiftRows(bits(128) op)
AESShiftRows(bits(128) op)
return (
op< 24+:8> : op< 48+:8> : op< 72+:8> : op< 96+:8> :
op<120+:8> : op< 16+:8> : op< 40+:8> : op< 64+:8> :
op< 88+:8> : op<112+:8> : op< 8+:8> : op< 32+:8> :
op< 56+:8> : op< 80+:8> : op<104+:8> : op< 0+:8>
op< 88+:8> : op< 48+:8> : op< 8+:8> : op< 96+:8> :
op< 56+:8> : op< 16+:8> : op<104+:8> : op< 64+:8> :
op< 24+:8> : op<112+:8> : op< 72+:8> : op< 32+:8> :
op<120+:8> : op< 80+:8> : op< 40+:8> : op< 0+:8>
);
// AESInvSubBytes()
// ================
// Transformation in the Inverse Cipher that is the inverse of AESSubBytes.
// AESSubBytes()
// =============
// Transformation in the Cipher that processes the State using a nonlinear
// byte substitution table (S-box) that operates on each of the State bytes
// independently.
bits(128) AESInvSubBytes(bits(128) op)
// Inverse S-box values
bits(16*16*8) GF2_inv = (
AESSubBytes(bits(128) op)
// S-box values
bits(16*16*8) GF2 = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x7d0c2155631469e126d677ba7e042b17<127:0> :
/*E*/ 0x619953833cbbebc8b0f52aae4d3be0a0<127:0> :
/*D*/ 0xef9cc9939f7ae52d0d4ab519a97f5160<127:0> :
/*C*/ 0x5fec8027591012b131c7078833a8dd1f<127:0> :
/*B*/ 0xf45acd78fec0db9a2079d2c64b3e56fc<127:0> :
/*A*/ 0x1bbe18aa0e62b76f89c5291d711af147<127:0> :
/*9*/ 0x6edf751ce837f9e28535ade72274ac96<127:0> :
/*8*/ 0x73e6b4f0cecff297eadc674f4111913a<127:0> :
/*7*/ 0x6b8a130103bdafc1020f3fca8f1e2cd0<127:0> :
/*6*/ 0x0645b3b80558e4f70ad3bc8c00abd890<127:0> :
/*5*/ 0x849d8da75746155edab9edfd5048706c<127:0> :
/*4*/ 0x92b6655dcc5ca4d41698688664f6f872<127:0> :
/*3*/ 0x25d18b6d49a25b76b224d92866a12e08<127:0> :
/*2*/ 0x4ec3fa420b954cee3d23c2a632947b54<127:0> :
/*1*/ 0xcbe9dec444438e3487ff2f9b8239e37c<127:0> :
/*0*/ 0xfbd7f3819ea340bf38a53630d56a0952<127:0>
/*F*/ 0x16bb54b00f2d99416842e6bf0d89a18c<127:0> :
/*E*/ 0xdf2855cee9871e9b948ed9691198f8e1<127:0> :
/*D*/ 0x9e1dc186b95735610ef6034866b53e70<127:0> :
/*C*/ 0x8a8bbd4b1f74dde8c6b4a61c2e2578ba<127:0> :
/*B*/ 0x08ae7a65eaf4566ca94ed58d6d37c8e7<127:0> :
/*A*/ 0x79e4959162acd3c25c2406490a3a32e0<127:0> :
/*9*/ 0xdb0b5ede14b8ee4688902a22dc4f8160<127:0> :
/*8*/ 0x73195d643d7ea7c41744975fec130ccd<127:0> :
/*7*/ 0xd2f3ff1021dab6bcf5389d928f40a351<127:0> :
/*6*/ 0xa89f3c507f02f94585334d43fbaaefd0<127:0> :
/*5*/ 0xcf584c4a39becb6a5bb1fc20ed00d153<127:0> :
/*4*/ 0x842fe329b3d63b52a05a6e1b1a2c8309<127:0> :
/*3*/ 0x75b227ebe28012079a059618c323c704<127:0> :
/*2*/ 0x1531d871f1e5a534ccf73f362693fdb7<127:0> :
/*1*/ 0xc072a49cafa2d4adf04759fa7dc982ca<127:0> :
/*0*/ 0x76abd7fe2b670130c56f6bf27b777c63<127:0>
);
bits(128) out;
for i = 0 to 15
out<i*8+:8> = GF2_inv< out<i*8+:8> = GF2<UInt(op<i*8+:8>)*8+:8>;
return out;
// AESMixColumns()
// ===============
// Transformation in the Cipher that takes all of the columns of the
// State and mixes their data (independently of one another) to
// produce new columns.
// FFmul02()
// =========
bits(128)bits(8) AESMixColumns(bits (128) op)
bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>;
bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>;
bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>;
bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>;
bits(4*8) out0;
bits(4*8) out1;
bits(4*8) out2;
bits(4*8) out3;
for c = 0 to 3
out0<c*8+:8> =FFmul02(bits(8) b)
bits(256*8) FFmul_02 = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0xE5E7E1E3EDEFE9EBF5F7F1F3FDFFF9FB<127:0> :
/*E*/ 0xC5C7C1C3CDCFC9CBD5D7D1D3DDDFD9DB<127:0> :
/*D*/ 0xA5A7A1A3ADAFA9ABB5B7B1B3BDBFB9BB<127:0> :
/*C*/ 0x858781838D8F898B959791939D9F999B<127:0> :
/*B*/ 0x656761636D6F696B757771737D7F797B<127:0> :
/*A*/ 0x454741434D4F494B555751535D5F595B<127:0> :
/*9*/ 0x252721232D2F292B353731333D3F393B<127:0> :
/*8*/ 0x050701030D0F090B151711131D1F191B<127:0> :
/*7*/ 0xFEFCFAF8F6F4F2F0EEECEAE8E6E4E2E0<127:0> :
/*6*/ 0xDEDCDAD8D6D4D2D0CECCCAC8C6C4C2C0<127:0> :
/*5*/ 0xBEBCBAB8B6B4B2B0AEACAAA8A6A4A2A0<127:0> :
/*4*/ 0x9E9C9A98969492908E8C8A8886848280<127:0> :
/*3*/ 0x7E7C7A78767472706E6C6A6866646260<127:0> :
/*2*/ 0x5E5C5A58565452504E4C4A4846444240<127:0> :
/*1*/ 0x3E3C3A38363432302E2C2A2826242220<127:0> :
/*0*/ 0x1E1C1A18161412100E0C0A0806040200<127:0>
);
return FFmul_02< FFmul02UInt(in0<c*8+:8>) EOR FFmul03(in1<c*8+:8>) EOR in2<c*8+:8> EOR in3<c*8+:8>;
out1<c*8+:8> = in0<c*8+:8> EOR FFmul02(in1<c*8+:8>) EOR FFmul03(in2<c*8+:8>) EOR in3<c*8+:8>;
out2<c*8+:8> = in0<c*8+:8> EOR in1<c*8+:8> EOR FFmul02(in2<c*8+:8>) EOR FFmul03(in3<c*8+:8>);
out3<c*8+:8> = FFmul03(in0<c*8+:8>) EOR in1<c*8+:8> EOR in2<c*8+:8> EOR FFmul02(in3<c*8+:8>);
return (
out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> :
out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> :
out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> :
out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8>
);(b)*8+:8>;
// AESShiftRows()
// ==============
// Transformation in the Cipher that processes the State by cyclically
// shifting the last three rows of the State by different offsets.
// FFmul03()
// =========
bits(128)bits(8) AESShiftRows(bits(128) op)
return (
op< 88+:8> : op< 48+:8> : op< 8+:8> : op< 96+:8> :
op< 56+:8> : op< 16+:8> : op<104+:8> : op< 64+:8> :
op< 24+:8> : op<112+:8> : op< 72+:8> : op< 32+:8> :
op<120+:8> : op< 80+:8> : op< 40+:8> : op< 0+:8>
);FFmul03(bits(8) b)
bits(256*8) FFmul_03 = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x1A191C1F16151013020104070E0D080B<127:0> :
/*E*/ 0x2A292C2F26252023323134373E3D383B<127:0> :
/*D*/ 0x7A797C7F76757073626164676E6D686B<127:0> :
/*C*/ 0x4A494C4F46454043525154575E5D585B<127:0> :
/*B*/ 0xDAD9DCDFD6D5D0D3C2C1C4C7CECDC8CB<127:0> :
/*A*/ 0xEAE9ECEFE6E5E0E3F2F1F4F7FEFDF8FB<127:0> :
/*9*/ 0xBAB9BCBFB6B5B0B3A2A1A4A7AEADA8AB<127:0> :
/*8*/ 0x8A898C8F86858083929194979E9D989B<127:0> :
/*7*/ 0x818287848D8E8B88999A9F9C95969390<127:0> :
/*6*/ 0xB1B2B7B4BDBEBBB8A9AAAFACA5A6A3A0<127:0> :
/*5*/ 0xE1E2E7E4EDEEEBE8F9FAFFFCF5F6F3F0<127:0> :
/*4*/ 0xD1D2D7D4DDDEDBD8C9CACFCCC5C6C3C0<127:0> :
/*3*/ 0x414247444D4E4B48595A5F5C55565350<127:0> :
/*2*/ 0x717277747D7E7B78696A6F6C65666360<127:0> :
/*1*/ 0x212227242D2E2B28393A3F3C35363330<127:0> :
/*0*/ 0x111217141D1E1B18090A0F0C05060300<127:0>
);
return FFmul_03<UInt(b)*8+:8>;
// AESSubBytes()
// =============
// Transformation in the Cipher that processes the State using a nonlinear
// byte substitution table (S-box) that operates on each of the State bytes
// independently.
// FFmul09()
// =========
bits(128)bits(8) AESSubBytes(bits(128) op)
// S-box values
bits(16*16*8) GF2 = (
FFmul09(bits(8) b)
bits(256*8) FFmul_09 = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x16bb54b00f2d99416842e6bf0d89a18c<127:0> :
/*E*/ 0xdf2855cee9871e9b948ed9691198f8e1<127:0> :
/*D*/ 0x9e1dc186b95735610ef6034866b53e70<127:0> :
/*C*/ 0x8a8bbd4b1f74dde8c6b4a61c2e2578ba<127:0> :
/*B*/ 0x08ae7a65eaf4566ca94ed58d6d37c8e7<127:0> :
/*A*/ 0x79e4959162acd3c25c2406490a3a32e0<127:0> :
/*9*/ 0xdb0b5ede14b8ee4688902a22dc4f8160<127:0> :
/*8*/ 0x73195d643d7ea7c41744975fec130ccd<127:0> :
/*7*/ 0xd2f3ff1021dab6bcf5389d928f40a351<127:0> :
/*6*/ 0xa89f3c507f02f94585334d43fbaaefd0<127:0> :
/*5*/ 0xcf584c4a39becb6a5bb1fc20ed00d153<127:0> :
/*4*/ 0x842fe329b3d63b52a05a6e1b1a2c8309<127:0> :
/*3*/ 0x75b227ebe28012079a059618c323c704<127:0> :
/*2*/ 0x1531d871f1e5a534ccf73f362693fdb7<127:0> :
/*1*/ 0xc072a49cafa2d4adf04759fa7dc982ca<127:0> :
/*0*/ 0x76abd7fe2b670130c56f6bf27b777c63<127:0>
/*F*/ 0x464F545D626B70790E071C152A233831<127:0> :
/*E*/ 0xD6DFC4CDF2FBE0E99E978C85BAB3A8A1<127:0> :
/*D*/ 0x7D746F6659504B42353C272E1118030A<127:0> :
/*C*/ 0xEDE4FFF6C9C0DBD2A5ACB7BE8188939A<127:0> :
/*B*/ 0x3039222B141D060F78716A635C554E47<127:0> :
/*A*/ 0xA0A9B2BB848D969FE8E1FAF3CCC5DED7<127:0> :
/*9*/ 0x0B0219102F263D34434A5158676E757C<127:0> :
/*8*/ 0x9B928980BFB6ADA4D3DAC1C8F7FEE5EC<127:0> :
/*7*/ 0xAAA3B8B18E879C95E2EBF0F9C6CFD4DD<127:0> :
/*6*/ 0x3A3328211E170C05727B6069565F444D<127:0> :
/*5*/ 0x9198838AB5BCA7AED9D0CBC2FDF4EFE6<127:0> :
/*4*/ 0x0108131A252C373E49405B526D647F76<127:0> :
/*3*/ 0xDCD5CEC7F8F1EAE3949D868FB0B9A2AB<127:0> :
/*2*/ 0x4C455E5768617A73040D161F2029323B<127:0> :
/*1*/ 0xE7EEF5FCC3CAD1D8AFA6BDB48B829990<127:0> :
/*0*/ 0x777E656C535A41483F362D241B120900<127:0>
);
bits(128) out;
for i = 0 to 15
out<i*8+:8> = GF2< return FFmul_09<UInt(op<i*8+:8>)*8+:8>;
return out;(b)*8+:8>;
// FFmul02()
// FFmul0B()
// =========
bits(8) FFmul02(bits(8) b)
bits(256*8) FFmul_02 = (
FFmul0B(bits(8) b)
bits(256*8) FFmul_0B = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0xE5E7E1E3EDEFE9EBF5F7F1F3FDFFF9FB<127:0> :
/*E*/ 0xC5C7C1C3CDCFC9CBD5D7D1D3DDDFD9DB<127:0> :
/*D*/ 0xA5A7A1A3ADAFA9ABB5B7B1B3BDBFB9BB<127:0> :
/*C*/ 0x858781838D8F898B959791939D9F999B<127:0> :
/*B*/ 0x656761636D6F696B757771737D7F797B<127:0> :
/*A*/ 0x454741434D4F494B555751535D5F595B<127:0> :
/*9*/ 0x252721232D2F292B353731333D3F393B<127:0> :
/*8*/ 0x050701030D0F090B151711131D1F191B<127:0> :
/*7*/ 0xFEFCFAF8F6F4F2F0EEECEAE8E6E4E2E0<127:0> :
/*6*/ 0xDEDCDAD8D6D4D2D0CECCCAC8C6C4C2C0<127:0> :
/*5*/ 0xBEBCBAB8B6B4B2B0AEACAAA8A6A4A2A0<127:0> :
/*4*/ 0x9E9C9A98969492908E8C8A8886848280<127:0> :
/*3*/ 0x7E7C7A78767472706E6C6A6866646260<127:0> :
/*2*/ 0x5E5C5A58565452504E4C4A4846444240<127:0> :
/*1*/ 0x3E3C3A38363432302E2C2A2826242220<127:0> :
/*0*/ 0x1E1C1A18161412100E0C0A0806040200<127:0>
/*F*/ 0xA3A8B5BE8F849992FBF0EDE6D7DCC1CA<127:0> :
/*E*/ 0x1318050E3F3429224B405D56676C717A<127:0> :
/*D*/ 0xD8D3CEC5F4FFE2E9808B969DACA7BAB1<127:0> :
/*C*/ 0x68637E75444F5259303B262D1C170A01<127:0> :
/*B*/ 0x555E434879726F640D061B10212A373C<127:0> :
/*A*/ 0xE5EEF3F8C9C2DFD4BDB6ABA0919A878C<127:0> :
/*9*/ 0x2E2538330209141F767D606B5A514C47<127:0> :
/*8*/ 0x9E958883B2B9A4AFC6CDD0DBEAE1FCF7<127:0> :
/*7*/ 0x545F424978736E650C071A11202B363D<127:0> :
/*6*/ 0xE4EFF2F9C8C3DED5BCB7AAA1909B868D<127:0> :
/*5*/ 0x2F2439320308151E777C616A5B504D46<127:0> :
/*4*/ 0x9F948982B3B8A5AEC7CCD1DAEBE0FDF6<127:0> :
/*3*/ 0xA2A9B4BF8E859893FAF1ECE7D6DDC0CB<127:0> :
/*2*/ 0x1219040F3E3528234A415C57666D707B<127:0> :
/*1*/ 0xD9D2CFC4F5FEE3E8818A979CADA6BBB0<127:0> :
/*0*/ 0x69627F74454E5358313A272C1D160B00<127:0>
);
return FFmul_02< return FFmul_0B<UInt(b)*8+:8>;
// FFmul03()
// FFmul0D()
// =========
bits(8) FFmul03(bits(8) b)
bits(256*8) FFmul_03 = (
FFmul0D(bits(8) b)
bits(256*8) FFmul_0D = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x1A191C1F16151013020104070E0D080B<127:0> :
/*E*/ 0x2A292C2F26252023323134373E3D383B<127:0> :
/*D*/ 0x7A797C7F76757073626164676E6D686B<127:0> :
/*C*/ 0x4A494C4F46454043525154575E5D585B<127:0> :
/*B*/ 0xDAD9DCDFD6D5D0D3C2C1C4C7CECDC8CB<127:0> :
/*A*/ 0xEAE9ECEFE6E5E0E3F2F1F4F7FEFDF8FB<127:0> :
/*9*/ 0xBAB9BCBFB6B5B0B3A2A1A4A7AEADA8AB<127:0> :
/*8*/ 0x8A898C8F86858083929194979E9D989B<127:0> :
/*7*/ 0x818287848D8E8B88999A9F9C95969390<127:0> :
/*6*/ 0xB1B2B7B4BDBEBBB8A9AAAFACA5A6A3A0<127:0> :
/*5*/ 0xE1E2E7E4EDEEEBE8F9FAFFFCF5F6F3F0<127:0> :
/*4*/ 0xD1D2D7D4DDDEDBD8C9CACFCCC5C6C3C0<127:0> :
/*3*/ 0x414247444D4E4B48595A5F5C55565350<127:0> :
/*2*/ 0x717277747D7E7B78696A6F6C65666360<127:0> :
/*1*/ 0x212227242D2E2B28393A3F3C35363330<127:0> :
/*0*/ 0x111217141D1E1B18090A0F0C05060300<127:0>
/*F*/ 0x979A8D80A3AEB9B4FFF2E5E8CBC6D1DC<127:0> :
/*E*/ 0x474A5D50737E69642F2235381B16010C<127:0> :
/*D*/ 0x2C21363B1815020F44495E53707D6A67<127:0> :
/*C*/ 0xFCF1E6EBC8C5D2DF94998E83A0ADBAB7<127:0> :
/*B*/ 0xFAF7E0EDCEC3D4D9929F8885A6ABBCB1<127:0> :
/*A*/ 0x2A27303D1E130409424F5855767B6C61<127:0> :
/*9*/ 0x414C5B5675786F622924333E1D10070A<127:0> :
/*8*/ 0x919C8B86A5A8BFB2F9F4E3EECDC0D7DA<127:0> :
/*7*/ 0x4D40575A7974636E25283F32111C0B06<127:0> :
/*6*/ 0x9D90878AA9A4B3BEF5F8EFE2C1CCDBD6<127:0> :
/*5*/ 0xF6FBECE1C2CFD8D59E938489AAA7B0BD<127:0> :
/*4*/ 0x262B3C31121F08054E4354597A77606D<127:0> :
/*3*/ 0x202D3A3714190E034845525F7C71666B<127:0> :
/*2*/ 0xF0FDEAE7C4C9DED39895828FACA1B6BB<127:0> :
/*1*/ 0x9B96818CAFA2B5B8F3FEE9E4C7CADDD0<127:0> :
/*0*/ 0x4B46515C7F726568232E3934171A0D00<127:0>
);
return FFmul_03< return FFmul_0D<UInt(b)*8+:8>;
// FFmul09()
// FFmul0E()
// =========
bits(8) FFmul09(bits(8) b)
bits(256*8) FFmul_09 = (
FFmul0E(bits(8) b)
bits(256*8) FFmul_0E = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x464F545D626B70790E071C152A233831<127:0> :
/*E*/ 0xD6DFC4CDF2FBE0E99E978C85BAB3A8A1<127:0> :
/*D*/ 0x7D746F6659504B42353C272E1118030A<127:0> :
/*C*/ 0xEDE4FFF6C9C0DBD2A5ACB7BE8188939A<127:0> :
/*B*/ 0x3039222B141D060F78716A635C554E47<127:0> :
/*A*/ 0xA0A9B2BB848D969FE8E1FAF3CCC5DED7<127:0> :
/*9*/ 0x0B0219102F263D34434A5158676E757C<127:0> :
/*8*/ 0x9B928980BFB6ADA4D3DAC1C8F7FEE5EC<127:0> :
/*7*/ 0xAAA3B8B18E879C95E2EBF0F9C6CFD4DD<127:0> :
/*6*/ 0x3A3328211E170C05727B6069565F444D<127:0> :
/*5*/ 0x9198838AB5BCA7AED9D0CBC2FDF4EFE6<127:0> :
/*4*/ 0x0108131A252C373E49405B526D647F76<127:0> :
/*3*/ 0xDCD5CEC7F8F1EAE3949D868FB0B9A2AB<127:0> :
/*2*/ 0x4C455E5768617A73040D161F2029323B<127:0> :
/*1*/ 0xE7EEF5FCC3CAD1D8AFA6BDB48B829990<127:0> :
/*0*/ 0x777E656C535A41483F362D241B120900<127:0>
/*F*/ 0x8D83919FB5BBA9A7FDF3E1EFC5CBD9D7<127:0> :
/*E*/ 0x6D63717F555B49471D13010F252B3937<127:0> :
/*D*/ 0x56584A446E60727C26283A341E10020C<127:0> :
/*C*/ 0xB6B8AAA48E80929CC6C8DAD4FEF0E2EC<127:0> :
/*B*/ 0x202E3C321816040A505E4C426866747A<127:0> :
/*A*/ 0xC0CEDCD2F8F6E4EAB0BEACA28886949A<127:0> :
/*9*/ 0xFBF5E7E9C3CDDFD18B859799B3BDAFA1<127:0> :
/*8*/ 0x1B150709232D3F316B657779535D4F41<127:0> :
/*7*/ 0xCCC2D0DEF4FAE8E6BCB2A0AE848A9896<127:0> :
/*6*/ 0x2C22303E141A08065C52404E646A7876<127:0> :
/*5*/ 0x17190B052F21333D67697B755F51434D<127:0> :
/*4*/ 0xF7F9EBE5CFC1D3DD87899B95BFB1A3AD<127:0> :
/*3*/ 0x616F7D735957454B111F0D032927353B<127:0> :
/*2*/ 0x818F9D93B9B7A5ABF1FFEDE3C9C7D5DB<127:0> :
/*1*/ 0xBAB4A6A8828C9E90CAC4D6D8F2FCEEE0<127:0> :
/*0*/ 0x5A544648626C7E702A243638121C0E00<127:0>
);
return FFmul_09< return FFmul_0E<UInt(b)*8+:8>;
// FFmul0B()
// =========
// HaveAESExt()
// ============
// TRUE if AES cryptographic instructions support is implemented,
// FALSE otherwise.
bits(8)boolean FFmul0B(bits(8) b)
bits(256*8) FFmul_0B = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0xA3A8B5BE8F849992FBF0EDE6D7DCC1CA<127:0> :
/*E*/ 0x1318050E3F3429224B405D56676C717A<127:0> :
/*D*/ 0xD8D3CEC5F4FFE2E9808B969DACA7BAB1<127:0> :
/*C*/ 0x68637E75444F5259303B262D1C170A01<127:0> :
/*B*/ 0x555E434879726F640D061B10212A373C<127:0> :
/*A*/ 0xE5EEF3F8C9C2DFD4BDB6ABA0919A878C<127:0> :
/*9*/ 0x2E2538330209141F767D606B5A514C47<127:0> :
/*8*/ 0x9E958883B2B9A4AFC6CDD0DBEAE1FCF7<127:0> :
/*7*/ 0x545F424978736E650C071A11202B363D<127:0> :
/*6*/ 0xE4EFF2F9C8C3DED5BCB7AAA1909B868D<127:0> :
/*5*/ 0x2F2439320308151E777C616A5B504D46<127:0> :
/*4*/ 0x9F948982B3B8A5AEC7CCD1DAEBE0FDF6<127:0> :
/*3*/ 0xA2A9B4BF8E859893FAF1ECE7D6DDC0CB<127:0> :
/*2*/ 0x1219040F3E3528234A415C57666D707B<127:0> :
/*1*/ 0xD9D2CFC4F5FEE3E8818A979CADA6BBB0<127:0> :
/*0*/ 0x69627F74454E5358313A272C1D160B00<127:0>
);
return FFmul_0B<HaveAESExt()
return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions";UInt(b)*8+:8>;
// FFmul0D()
// =========
// HaveBit128PMULLExt()
// ====================
// TRUE if 128 bit form of PMULL instructions support is implemented,
// FALSE otherwise.
bits(8)boolean FFmul0D(bits(8) b)
bits(256*8) FFmul_0D = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x979A8D80A3AEB9B4FFF2E5E8CBC6D1DC<127:0> :
/*E*/ 0x474A5D50737E69642F2235381B16010C<127:0> :
/*D*/ 0x2C21363B1815020F44495E53707D6A67<127:0> :
/*C*/ 0xFCF1E6EBC8C5D2DF94998E83A0ADBAB7<127:0> :
/*B*/ 0xFAF7E0EDCEC3D4D9929F8885A6ABBCB1<127:0> :
/*A*/ 0x2A27303D1E130409424F5855767B6C61<127:0> :
/*9*/ 0x414C5B5675786F622924333E1D10070A<127:0> :
/*8*/ 0x919C8B86A5A8BFB2F9F4E3EECDC0D7DA<127:0> :
/*7*/ 0x4D40575A7974636E25283F32111C0B06<127:0> :
/*6*/ 0x9D90878AA9A4B3BEF5F8EFE2C1CCDBD6<127:0> :
/*5*/ 0xF6FBECE1C2CFD8D59E938489AAA7B0BD<127:0> :
/*4*/ 0x262B3C31121F08054E4354597A77606D<127:0> :
/*3*/ 0x202D3A3714190E034845525F7C71666B<127:0> :
/*2*/ 0xF0FDEAE7C4C9DED39895828FACA1B6BB<127:0> :
/*1*/ 0x9B96818CAFA2B5B8F3FEE9E4C7CADDD0<127:0> :
/*0*/ 0x4B46515C7F726568232E3934171A0D00<127:0>
);
return FFmul_0D<HaveBit128PMULLExt()
return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions";UInt(b)*8+:8>;
// FFmul0E()
// =========
// HaveSHA1Ext()
// =============
// TRUE if SHA1 cryptographic instructions support is implemented,
// FALSE otherwise.
bits(8)boolean FFmul0E(bits(8) b)
bits(256*8) FFmul_0E = (
/* F E D C B A 9 8 7 6 5 4 3 2 1 0 */
/*F*/ 0x8D83919FB5BBA9A7FDF3E1EFC5CBD9D7<127:0> :
/*E*/ 0x6D63717F555B49471D13010F252B3937<127:0> :
/*D*/ 0x56584A446E60727C26283A341E10020C<127:0> :
/*C*/ 0xB6B8AAA48E80929CC6C8DAD4FEF0E2EC<127:0> :
/*B*/ 0x202E3C321816040A505E4C426866747A<127:0> :
/*A*/ 0xC0CEDCD2F8F6E4EAB0BEACA28886949A<127:0> :
/*9*/ 0xFBF5E7E9C3CDDFD18B859799B3BDAFA1<127:0> :
/*8*/ 0x1B150709232D3F316B657779535D4F41<127:0> :
/*7*/ 0xCCC2D0DEF4FAE8E6BCB2A0AE848A9896<127:0> :
/*6*/ 0x2C22303E141A08065C52404E646A7876<127:0> :
/*5*/ 0x17190B052F21333D67697B755F51434D<127:0> :
/*4*/ 0xF7F9EBE5CFC1D3DD87899B95BFB1A3AD<127:0> :
/*3*/ 0x616F7D735957454B111F0D032927353B<127:0> :
/*2*/ 0x818F9D93B9B7A5ABF1FFEDE3C9C7D5DB<127:0> :
/*1*/ 0xBAB4A6A8828C9E90CAC4D6D8F2FCEEE0<127:0> :
/*0*/ 0x5A544648626C7E702A243638121C0E00<127:0>
);
return FFmul_0E<HaveSHA1Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";UInt(b)*8+:8>;
// HaveAESExt()
// ============
// TRUE if AES cryptographic instructions support is implemented,
// HaveSHA256Ext()
// ===============
// TRUE if SHA256 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveAESExt()
return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions";HaveSHA256Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions";
// HaveBit128PMULLExt()
// ====================
// TRUE if 128 bit form of PMULL instructions support is implemented,
// HaveSHA3Ext()
// =============
// TRUE if SHA3 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveBit128PMULLExt()
return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions";HaveSHA3Ext()
if !HasArchVersion(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";
// HaveSHA1Ext()
// =============
// TRUE if SHA1 cryptographic instructions support is implemented,
// HaveSHA512Ext()
// ===============
// TRUE if SHA512 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA1Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";HaveSHA512Ext()
if !HasArchVersion(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";
// HaveSHA256Ext()
// ===============
// TRUE if SHA256 cryptographic instructions support is implemented,
// HaveSM3Ext()
// ============
// TRUE if SM3 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA256Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions";HaveSM3Ext()
if !HasArchVersion(ARMv8p2) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";
// HaveSHA3Ext()
// =============
// TRUE if SHA3 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// HaveSM4Ext()
// ============
// TRUE if SM4 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA3Ext()
HaveSM4Ext()
if !HasArchVersion(ARMv8p2) || !() then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";
// HaveSHA512Ext()
// ===============
// TRUE if SHA512 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
// ROL()
// =====
booleanbits(N) HaveSHA512Ext()
if !ROL(bits(N) x, integer shift)
assert shift >= 0 && shift <= N;
if (shift == 0) then
return x;
returnHasArchVersionROR(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";(x, N-shift);
// HaveSM3Ext()
// SHA256hash()
// ============
// TRUE if SM3 cryptographic instructions support is implemented,
// FALSE otherwise.
booleanbits(128) HaveSM3Ext()
if !SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1)
bits(32) chs, maj, t;
for e = 0 to 3
chs =HasArchVersionSHAchoose((Y<31:0>, Y<63:32>, Y<95:64>);
maj =(X<31:0>, X<63:32>, X<95:64>);
t = Y<127:96> + SHAhashSIGMA1(Y<31:0>) + chs + Elem[W, e, 32];
X<127:96> = t + X<127:96>;
Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj;
<Y, X> = ROLARMv8p2SHAmajority) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";(Y : X, 32);
return (if part1 then X else Y);
// HaveSM4Ext()
// ============
// TRUE if SM4 cryptographic instructions support is implemented,
// FALSE otherwise.
// SHAchoose()
// ===========
booleanbits(32) HaveSM4Ext()
if !SHAchoose(bits(32) x, bits(32) y, bits(32) z)
return (((y EOR z) AND x) EOR z);HasArchVersion(ARMv8p2) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";
// ROL()
// =====
// SHAhashSIGMA0()
// ===============
bits(N)bits(32) ROL(bits(N) x, integer shift)
assert shift >= 0 && shift <= N;
if (shift == 0) then
return x;
SHAhashSIGMA0(bits(32) x)
return ROR(x, N-shift);(x, 2) EORROR(x, 13) EOR ROR(x, 22);
// SHA256hash()
// ============
// SHAhashSIGMA1()
// ===============
bits(128)bits(32) SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1)
bits(32) chs, maj, t;
for e = 0 to 3
chs =SHAhashSIGMA1(bits(32) x)
return SHAchooseROR(Y<31:0>, Y<63:32>, Y<95:64>);
maj =(x, 6) EOR SHAmajorityROR(X<31:0>, X<63:32>, X<95:64>);
t = Y<127:96> +(x, 11) EOR SHAhashSIGMA1ROR(Y<31:0>) + chs + Elem[W, e, 32];
X<127:96> = t + X<127:96>;
Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj;
<Y, X> = ROL(Y : X, 32);
return (if part1 then X else Y);(x, 25);
// SHAchoose()
// ===========
// SHAmajority()
// =============
bits(32) SHAchoose(bits(32) x, bits(32) y, bits(32) z)
return (((y EOR z) AND x) EOR z);SHAmajority(bits(32) x, bits(32) y, bits(32) z)
return ((x AND y) OR ((x OR y) AND z));
// SHAhashSIGMA0()
// ===============
// SHAparity()
// ===========
bits(32) SHAhashSIGMA0(bits(32) x)
returnSHAparity(bits(32) x, bits(32) y, bits(32) z)
return (x EOR y EOR z); ROR(x, 2) EOR ROR(x, 13) EOR ROR(x, 22);
// SHAhashSIGMA1()
// ===============
// Sbox()
// ======
// Used in SM4E crypto instruction
bits(32)bits(8) SHAhashSIGMA1(bits(32) x)
returnSbox(bits(8) sboxin)
bits(8) sboxout;
bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>;
sboxout = sboxstring<(255- RORUInt(x, 6) EOR(sboxin))*8+7:(255- RORUInt(x, 11) EOR ROR(x, 25);(sboxin))*8>;
return sboxout;
// SHAmajority()
// =============
bits(32)// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they
// record any part of the physical address region of size bytes starting at paddress.
// It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid
// is also cleared if it records any part of the address region. SHAmajority(bits(32) x, bits(32) y, bits(32) z)
return ((x AND y) OR ((x OR y) AND z));ClearExclusiveByAddress(FullAddress paddress, integer processorid, integer size);
// SHAparity()
// ===========
bits(32)// Clear the local Exclusives monitor for the specified processorid. SHAparity(bits(32) x, bits(32) y, bits(32) z)
return (x EOR y EOR z);ClearExclusiveLocal(integer processorid);
// Sbox()
// ======
// Used in SM4E crypto instruction
// ClearExclusiveMonitors()
// ========================
bits(8)// Clear the local Exclusives monitor for the executing PE. Sbox(bits(8) sboxin)
bits(8) sboxout;
bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>;
sboxout = sboxstring<(255-ClearExclusiveMonitors()UIntClearExclusiveLocal(sboxin))*8+7:(255-(UIntProcessorID(sboxin))*8>;
return sboxout;());
// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they
// record any part of the physical address region of size bytes starting at paddress.
// It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid
// is also cleared if it records any part of the address region.// Returns '0' to indicate success if the last memory write by this PE was to
// the same physical address region endorsed by ExclusiveMonitorsPass().
// Returns '1' to indicate failure if address translation resulted in a different
// physical address.
bit
ClearExclusiveByAddress(ExclusiveMonitorsStatus();FullAddress paddress, integer processorid, integer size);
// Clear the local Exclusives monitor for the specified processorid.// Return TRUE if the global Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean
ClearExclusiveLocal(integer processorid);IsExclusiveGlobal(FullAddress paddress, integer processorid, integer size);
// ClearExclusiveMonitors()
// ========================
// Clear the local Exclusives monitor for the executing PE.// Return TRUE if the local Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean
ClearExclusiveMonitors()IsExclusiveLocal(
ClearExclusiveLocalFullAddress(ProcessorID());paddress, integer processorid, integer size);
// Returns '0' to indicate success if the last memory write by this PE was to
// the same physical address region endorsed by ExclusiveMonitorsPass().
// Returns '1' to indicate failure if address translation resulted in a different
// physical address.
bit// Record the physical address region of size bytes starting at paddress in
// the global Exclusives monitor for processorid. ExclusiveMonitorsStatus();MarkExclusiveGlobal(FullAddress paddress, integer processorid, integer size);
// Return TRUE if the global Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean// Record the physical address region of size bytes starting at paddress in
// the local Exclusives monitor for processorid. IsExclusiveGlobal(MarkExclusiveLocal(FullAddress paddress, integer processorid, integer size);
// Return TRUE if the local Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean// Return the ID of the currently executing PE.
integer IsExclusiveLocal(ProcessorID();FullAddress paddress, integer processorid, integer size);
// Record the physical address region of size bytes starting at paddress in
// the global Exclusives monitor for processorid.// AArch32.HaveHPDExt()
// ====================
boolean
MarkExclusiveGlobal(AArch32.HaveHPDExt()
return(ARMv8p2FullAddressHasArchVersion paddress, integer processorid, integer size););
// Record the physical address region of size bytes starting at paddress in
// the local Exclusives monitor for processorid.// AArch64.HaveHPDExt()
// ====================
boolean
MarkExclusiveLocal(AArch64.HaveHPDExt()
return(ARMv8p1FullAddressHasArchVersion paddress, integer processorid, integer size););
// Return the ID of the currently executing PE.
integer// Have52BitIPAAndPASpaceExt()
// ===========================
// Returns TRUE if 52-bit IPA and PA extension support
// is implemented, and FALSE otherwise.
boolean ProcessorID();Have52BitIPAAndPASpaceExt()
return (HasArchVersion(ARMv8p7) &&
boolean IMPLEMENTATION_DEFINED "Has 52-bit IPA and PA support" &&
Have52BitVAExt() && Have52BitPAExt());
// AArch32.HaveHPDExt()
// ====================
// Have52BitPAExt()
// ================
// Returns TRUE if Large Physical Address extension
// support is implemented and FALSE otherwise.
boolean AArch32.HaveHPDExt()
Have52BitPAExt()
return HasArchVersion(ARMv8p2);) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit PA/IPA support";
// AArch64.HaveHPDExt()
// ====================
// Have52BitVAExt()
// ================
// Returns TRUE if Large Virtual Address extension
// support is implemented and FALSE otherwise.
boolean AArch64.HaveHPDExt()
Have52BitVAExt()
return HasArchVersion(ARMv8p1ARMv8p2);) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit VA support";
// Have52BitIPAAndPASpaceExt()
// ===========================
// Returns TRUE if 52-bit IPA and PA extension support
// is implemented, and FALSE otherwise.
// HaveAArch32BF16Ext()
// ====================
// Returns TRUE if AArch32 BFloat16 instruction support is implemented, and FALSE otherwise.
boolean Have52BitIPAAndPASpaceExt()
return (HaveAArch32BF16Ext()
returnHasArchVersion(ARMv8p7ARMv8p2) &&
boolean IMPLEMENTATION_DEFINED "Has 52-bit IPA and PA support" &&
Have52BitVAExt() && Have52BitPAExt());) && boolean IMPLEMENTATION_DEFINED "Has AArch32 BFloat16 extension";
// Have52BitPAExt()
// ================
// Returns TRUE if Large Physical Address extension
// support is implemented and FALSE otherwise.
// HaveAArch32Int8MatMulExt()
// ==========================
// Returns TRUE if AArch32 8-bit integer matrix multiply instruction support
// implemented, and FALSE otherwise.
boolean Have52BitPAExt()
HaveAArch32Int8MatMulExt()
return HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit PA/IPA support";) && boolean IMPLEMENTATION_DEFINED "Has AArch32 Int8 Mat Mul extension";
// Have52BitVAExt()
// ================
// Returns TRUE if Large Virtual Address extension
// support is implemented and FALSE otherwise.
// HaveAltFP()
// ===========
// Returns TRUE if alternative Floating-point extension support
// is implemented, and FALSE otherwise.
boolean Have52BitVAExt()
HaveAltFP()
return HasArchVersion(ARMv8p2ARMv8p7) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit VA support";);
// HaveAArch32BF16Ext()
// ====================
// Returns TRUE if AArch32 BFloat16 instruction support is implemented, and FALSE otherwise.
// HaveAtomicExt()
// ===============
boolean HaveAArch32BF16Ext()
HaveAtomicExt()
return HasArchVersion(ARMv8p2ARMv8p1) && boolean IMPLEMENTATION_DEFINED "Has AArch32 BFloat16 extension";);
// HaveAArch32Int8MatMulExt()
// ==========================
// Returns TRUE if AArch32 8-bit integer matrix multiply instruction support
// implemented, and FALSE otherwise.
// HaveBF16Ext()
// =============
// Returns TRUE if AArch64 BFloat16 instruction support is implemented, and FALSE otherwise.
boolean HaveAArch32Int8MatMulExt()
HaveBF16Ext()
return HasArchVersion(ARMv8p6) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has AArch32 Int8 Mat Mul extension";) && boolean IMPLEMENTATION_DEFINED "Has AArch64 BFloat16 extension");
// HaveAltFP()
// ===========
// Returns TRUE if alternative Floating-point extension support
// is implemented, and FALSE otherwise.
// HaveBTIExt()
// ============
// Returns TRUE if support for Branch Target Indentification is implemented.
boolean HaveAltFP()
HaveBTIExt()
return HasArchVersion(ARMv8p7ARMv8p5);
// HaveAtomicExt()
// ===============
// HaveBlockBBM()
// ==============
// Returns TRUE if support for changing block size without requring break-before-make is implemented.
boolean HaveAtomicExt()
HaveBlockBBM()
return HasArchVersion(ARMv8p1ARMv8p4);
// HaveBF16Ext()
// =============
// Returns TRUE if AArch64 BFloat16 instruction support is implemented, and FALSE otherwise.
// HaveCommonNotPrivateTransExt()
// ==============================
boolean HaveBF16Ext()
HaveCommonNotPrivateTransExt()
return HasArchVersion(ARMv8p6) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has AArch64 BFloat16 extension"););
// HaveBTIExt()
// HaveDGHExt()
// ============
// Returns TRUE if support for Branch Target Indentification is implemented.
// Returns TRUE if Data Gathering Hint instruction support is implemented, and FALSE otherwise.
boolean HaveBTIExt()
returnHaveDGHExt()
return boolean IMPLEMENTATION_DEFINED "Has AArch64 DGH extension"; HasArchVersion(ARMv8p5);
// HaveBlockBBM()
// ==============
// Returns TRUE if support for changing block size without requring break-before-make is implemented.
// HaveDITExt()
// ============
boolean HaveBlockBBM()
HaveDITExt()
return HasArchVersion(ARMv8p4);
// HaveCommonNotPrivateTransExt()
// ==============================
// HaveDOTPExt()
// =============
// Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise.
boolean HaveCommonNotPrivateTransExt()
HaveDOTPExt()
return HasArchVersion(ARMv8p4) || (HasArchVersion(ARMv8p2);) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension");
// HaveDGHExt()
// ============
// Returns TRUE if Data Gathering Hint instruction support is implemented, and FALSE otherwise.
// HaveDoPD()
// ==========
// Returns TRUE if Debug Over Power Down extension
// support is implemented and FALSE otherwise.
boolean HaveDGHExt()
return boolean IMPLEMENTATION_DEFINED "Has AArch64 DGH extension";HaveDoPD()
returnHasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has DoPD extension";
// HaveDITExt()
// ============
// HaveDoubleFaultExt()
// ====================
boolean HaveDITExt()
returnHaveDoubleFaultExt()
return ( HasArchVersion(ARMv8p4);) &&HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESB());
// HaveDOTPExt()
// =============
// Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise.
// HaveDoubleLock()
// ================
// Returns TRUE if support for the OS Double Lock is implemented.
boolean HaveDOTPExt()
returnHaveDoubleLock()
return ! HasArchVersion(ARMv8p4) || () || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension");
// HaveDoPD()
// ==========
// Returns TRUE if Debug Over Power Down extension
// support is implemented and FALSE otherwise.
// HaveE0PDExt()
// =============
// Returns TRUE if support for constant fault times for unprivileged accesses
// to the memory map is implemented.
boolean HaveDoPD()
HaveE0PDExt()
return HasArchVersion(ARMv8p2ARMv8p5) && boolean IMPLEMENTATION_DEFINED "Has DoPD extension";);
// HaveDoubleFaultExt()
// ====================
// HaveECVExt()
// ============
// Returns TRUE if Enhanced Counter Virtualization extension
// support is implemented, and FALSE otherwise.
boolean HaveDoubleFaultExt()
return (HaveECVExt()
returnHasArchVersion(ARMv8p4ARMv8p6) && HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESB()););
// HaveDoubleLock()
// ================
// Returns TRUE if support for the OS Double Lock is implemented.
// HaveEMPAMExt()
// ==============
// Returns TRUE if Enhanced MPAM is implemented, and FALSE otherwise.
boolean HaveDoubleLock()
return !HaveEMPAMExt()
return (HasArchVersion() &&
HaveMPAMExtARMv8p4ARMv8p6) || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";() &&
boolean IMPLEMENTATION_DEFINED "Has enhanced MPAM extension");
// HaveE0PDExt()
// =============
// Returns TRUE if support for constant fault times for unprivileged accesses
// to the memory map is implemented.
// HaveExtendedCacheSets()
// =======================
boolean HaveE0PDExt()
HaveExtendedCacheSets()
return HasArchVersion(ARMv8p5ARMv8p3);
// HaveECVExt()
// ============
// Returns TRUE if Enhanced Counter Virtualization extension
// support is implemented, and FALSE otherwise.
// HaveExtendedECDebugEvents()
// ===========================
boolean HaveECVExt()
HaveExtendedECDebugEvents()
return HasArchVersion(ARMv8p6ARMv8p2);
// HaveEMPAMExt()
// ==============
// Returns TRUE if Enhanced MPAM is implemented, and FALSE otherwise.
// HaveExtendedExecuteNeverExt()
// =============================
boolean HaveEMPAMExt()
return (HaveExtendedExecuteNeverExt()
returnHasArchVersion(ARMv8p6ARMv8p2) &&
HaveMPAMExt() &&
boolean IMPLEMENTATION_DEFINED "Has enhanced MPAM extension"););
// HaveExtendedCacheSets()
// =======================
// HaveFCADDExt()
// ==============
boolean HaveExtendedCacheSets()
HaveFCADDExt()
return HasArchVersion(ARMv8p3);
// HaveExtendedECDebugEvents()
// ===========================
// HaveFGTExt()
// ============
// Returns TRUE if Fine Grained Trap is implemented, and FALSE otherwise.
boolean HaveExtendedECDebugEvents()
HaveFGTExt()
return HasArchVersion() && !ELUsingAArch32(EL2ARMv8p2ARMv8p6);
// HaveExtendedExecuteNeverExt()
// =============================
// HaveFJCVTZSExt()
// ================
boolean HaveExtendedExecuteNeverExt()
HaveFJCVTZSExt()
return HasArchVersion(ARMv8p2ARMv8p3);
// HaveFCADDExt()
// ==============
// HaveFP16MulNoRoundingToFP32Ext()
// ================================
// Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions,
// and FALSE otherwise
boolean HaveFCADDExt()
returnHaveFP16MulNoRoundingToFP32Ext()
if ! HaveFP16Ext() then return FALSE;
if HasArchVersion() then return TRUE;
return (HasArchVersion(ARMv8p2ARMv8p3ARMv8p4);) &&
boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension");
// HaveFGTExt()
// ============
// Returns TRUE if Fine Grained Trap is implemented, and FALSE otherwise.
// HaveFeatLS64()
// ==============
// Returns TRUE if the LD64B, ST64B, ST64BV, and ST64BV0 instructions are
// supported, and FALSE otherwise.
boolean HaveFGTExt()
returnHaveFeatLS64()
return ( HasArchVersion(ARMv8p6ARMv8p7) && !ELUsingAArch32(EL2);) &&
boolean IMPLEMENTATION_DEFINED "Has Load Store 64-Byte instruction support");
// HaveFJCVTZSExt()
// ================
// HaveFeatRPRES()
// ===============
// Returns TRUE if the Reciprocal Estimate and Reciprocal Square Root Estimate
// instructions have increased precision, and FALSE otherwise.
boolean HaveFJCVTZSExt()
returnHaveFeatRPRES()
return ( HasArchVersion() &&
(boolean IMPLEMENTATION_DEFINED "Has increased Reciprocal Estimate and Square Root Estimate precision support") &&
HaveAltFPARMv8p3ARMv8p7);());
// HaveFP16MulNoRoundingToFP32Ext()
// ================================
// Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions,
// and FALSE otherwise
// HaveFeatWFxT()
// ==============
// Returns TRUE if WFET and WFIT instruction support is implemented,
// and FALSE otherwise.
boolean HaveFP16MulNoRoundingToFP32Ext()
if !HaveFeatWFxT()
returnHaveFP16Ext() then return FALSE;
if HasArchVersion(ARMv8p4ARMv8p7) then return TRUE;
return (HasArchVersion(ARMv8p2) &&
boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension"););
// HaveFeatHCX()
// =============
// Returns TRUE if HCRX_EL2 Trap Control register is implemented,
// and FALSE otherwise.
// HaveFeatXS()
// ============
// Returns TRUE if XS attribute and the TLBI and DSB instructions with nXS qualifier
// are supported, and FALSE otherwise.
boolean HaveFeatHCX()
HaveFeatXS()
return HasArchVersion(ARMv8p7);
// HaveFeatLS64()
// ==============
// Returns TRUE if the LD64B, ST64B, ST64BV, and ST64BV0 instructions are
// supported, and FALSE otherwise.
// HaveFlagFormatExt()
// ===================
// Returns TRUE if flag format conversion instructions implemented.
boolean HaveFeatLS64()
return (HaveFlagFormatExt()
returnHasArchVersion(ARMv8p7ARMv8p5) &&
boolean IMPLEMENTATION_DEFINED "Has Load Store 64-Byte instruction support"););
// HaveFeatRPRES()
// ===============
// Returns TRUE if reciprocal estimate implements 12-bit precision
// when FPCR.AH=1, and FALSE otherwise.
// HaveFlagManipulateExt()
// =======================
// Returns TRUE if flag manipulate instructions are implemented.
boolean HaveFeatRPRES()
return (HaveFlagManipulateExt()
returnHasArchVersion(ARMv8p7ARMv8p4) &&
(boolean IMPLEMENTATION_DEFINED "Has increased Reciprocal Estimate and Square Root Estimate precision support") &&
HaveAltFP()););
// HaveFeatWFxT()
// HaveFrintExt()
// ==============
// Returns TRUE if WFET and WFIT instruction support is implemented,
// and FALSE otherwise.
// Returns TRUE if FRINT instructions are implemented.
boolean HaveFeatWFxT()
HaveFrintExt()
return HasArchVersion(ARMv8p7ARMv8p5);
// HaveFeatXS()
// ============
// Returns TRUE if XS attribute and the TLBI and DSB instructions with nXS qualifier
// are supported, and FALSE otherwise.
// HaveHCRXEL2Ext()
// ================
// Returns TRUE if HCRX_EL2 Trap Control register is implemented,
// and FALSE otherwise.
boolean HaveFeatXS()
HaveHCRXEL2Ext()
return HasArchVersion(ARMv8p7);
// HaveFlagFormatExt()
// ===================
// Returns TRUE if flag format conversion instructions implemented.
// HaveHPMDExt()
// =============
boolean HaveFlagFormatExt()
HaveHPMDExt()
return HasArchVersion(ARMv8p5ARMv8p1);
// HaveFlagManipulateExt()
// =======================
// Returns TRUE if flag manipulate instructions are implemented.
// HaveIDSExt()
// ============
// Returns TRUE if ID register handling feature is implemented.
boolean HaveFlagManipulateExt()
HaveIDSExt()
return HasArchVersion(ARMv8p4);
// HaveFrintExt()
// ==============
// Returns TRUE if FRINT instructions are implemented.
// HaveIESB()
// ==========
boolean HaveFrintExt()
returnHaveIESB()
return ( HasArchVersionHaveRASExt(ARMv8p5);() &&
boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");
// HaveHPMDExt()
// =============
// HaveInt8MatMulExt()
// ===================
// Returns TRUE if AArch64 8-bit integer matrix multiply instruction support
// implemented, and FALSE otherwise.
boolean HaveHPMDExt()
HaveInt8MatMulExt()
return HasArchVersion() || (HasArchVersion(ARMv8p2ARMv8p1ARMv8p6);) && boolean IMPLEMENTATION_DEFINED "Has AArch64 Int8 Mat Mul extension");
// HaveIDSExt()
// ============
// Returns TRUE if ID register handling feature is implemented.
// HaveLSE2Ext()
// =============
// Returns TRUE if LSE2 is implemented, and FALSE otherwise.
boolean HaveIDSExt()
HaveLSE2Ext()
return HasArchVersion(ARMv8p4);
// HaveIESB()
// ==========
// HaveMPAMExt()
// =============
// Returns TRUE if MPAM is implemented, and FALSE otherwise.
boolean HaveIESB()
HaveMPAMExt()
return ((ARMv8p2HaveRASExtHasArchVersion() &&
boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");) &&
boolean IMPLEMENTATION_DEFINED "Has MPAM extension");
// HaveInt8MatMulExt()
// ===================
// Returns TRUE if AArch64 8-bit integer matrix multiply instruction support
// implemented, and FALSE otherwise.
// HaveMTE2Ext()
// =============
// Returns TRUE if MTE support is beyond EL0, and FALSE otherwise.
boolean HaveInt8MatMulExt()
returnHaveMTE2Ext()
if ! HasArchVersion(ARMv8p6ARMv8p5) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has AArch64 Int8 Mat Mul extension");) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has MTE2 extension";
// HaveLSE2Ext()
// HaveMTE3Ext()
// =============
// Returns TRUE if LSE2 is implemented, and FALSE otherwise.
// Returns TRUE if MTE Asymmetric Fault Handling support is
// implemented, and FALSE otherwise.
boolean HaveLSE2Ext()
returnHaveMTE3Ext()
return (( HasArchVersion() && HaveMTE2Ext()) || (HasArchVersion(ARMv8p5ARMv8p4ARMv8p7);) &&
boolean IMPLEMENTATION_DEFINED "Has MTE3 extension"));
// HaveMPAMExt()
// =============
// Returns TRUE if MPAM is implemented, and FALSE otherwise.
// HaveMTEExt()
// ============
// Returns TRUE if MTE implemented, and FALSE otherwise.
boolean HaveMPAMExt()
return (HaveMTEExt()
if !HasArchVersion(ARMv8p2ARMv8p5) &&
boolean IMPLEMENTATION_DEFINED "Has MPAM extension");) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has MTE extension";
// HaveMTE2Ext()
// =============
// Returns TRUE if MTE support is beyond EL0, and FALSE otherwise.
// HaveNV2Ext()
// ============
// Returns TRUE if Enhanced Nested Virtualization is implemented.
boolean HaveMTE2Ext()
if !HaveNV2Ext()
return (HasArchVersion() && HaveNVExtARMv8p5ARMv8p4) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has MTE2 extension";()
&& boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization");
// HaveMTE3Ext()
// =============
// Returns TRUE if MTE Asymmetric Fault Handling support is
// implemented, and FALSE otherwise.
// HaveNVExt()
// ===========
// Returns TRUE if Nested Virtualization is implemented.
boolean HaveMTE3Ext()
return ((HaveNVExt()
returnHasArchVersion(ARMv8p7ARMv8p3) && HaveMTE2Ext()) || (HasArchVersion(ARMv8p5) &&
boolean IMPLEMENTATION_DEFINED "Has MTE3 extension"));) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";
// HaveMTEExt()
// ============
// Returns TRUE if MTE implemented, and FALSE otherwise.
// HaveNoSecurePMUDisableOverride()
// ================================
boolean HaveMTEExt()
if !HaveNoSecurePMUDisableOverride()
returnHasArchVersion(ARMv8p5ARMv8p2) then
return FALSE;
if HaveMTE2Ext() then
return TRUE;
return boolean IMPLEMENTATION_DEFINED "Has MTE extension";);
// HaveNV2Ext()
// ============
// Returns TRUE if Enhanced Nested Virtualization is implemented.
// HaveNoninvasiveDebugAuth()
// ==========================
// Returns TRUE if the Non-invasive debug controls are implemented.
boolean HaveNV2Ext()
return (HaveNoninvasiveDebugAuth()
return !HasArchVersion(ARMv8p4) &&); HaveNVExt()
&& boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization");
// HaveNVExt()
// ===========
// Returns TRUE if Nested Virtualization is implemented.
// HavePAN3Ext()
// =============
// Returns TRUE if SCTLR_EL1.EPAN and SCTLR_EL2.EPAN support is implemented,
// and FALSE otherwise.
boolean HaveNVExt()
HavePAN3Ext()
return HasArchVersion() || (HasArchVersion(ARMv8p1ARMv8p3ARMv8p7) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";) &&
boolean IMPLEMENTATION_DEFINED "Has PAN3 extension");
// HaveNoSecurePMUDisableOverride()
// ================================
// HavePANExt()
// ============
boolean HaveNoSecurePMUDisableOverride()
HavePANExt()
return HasArchVersion(ARMv8p2ARMv8p1);
// HaveNoninvasiveDebugAuth()
// ==========================
// Returns TRUE if the Non-invasive debug controls are implemented.
// HavePMUv3p7()
// =============
// Returns TRUE if the PMUv3p7 extension is implemented, and FALSE otherwise.
boolean HaveNoninvasiveDebugAuth()
return !HavePMUv3p7()
return (HasArchVersion() && Havev85PMUARMv8p4ARMv8p7);() &&
boolean IMPLEMENTATION_DEFINED "Has PMUv3p7 extension");
// HavePAN3Ext()
// =============
// Returns TRUE if SCTLR_EL1.EPAN and SCTLR_EL2.EPAN support is implemented,
// and FALSE otherwise.
// HavePageBasedHardwareAttributes()
// =================================
boolean HavePAN3Ext()
HavePageBasedHardwareAttributes()
return HasArchVersion(ARMv8p7ARMv8p2) || (HasArchVersion(ARMv8p1) &&
boolean IMPLEMENTATION_DEFINED "Has PAN3 extension"););
// HavePANExt()
// ============
// HavePrivATExt()
// ===============
boolean HavePANExt()
HavePrivATExt()
return HasArchVersion(ARMv8p1ARMv8p2);
// HavePMUv3p7()
// =============
// Returns TRUE if the PMUv3p7 extension is implemented, and FALSE otherwise.
// HaveQRDMLAHExt()
// ================
boolean HavePMUv3p7()
return (HaveQRDMLAHExt()
returnHasArchVersion(ARMv8p7ARMv8p1) &&);
boolean HaveAccessFlagUpdateExt()
return HasArchVersion(ARMv8p1);
boolean HaveDirtyBitModifierExt()
return HasArchVersion(ARMv8p1Havev85PMU() &&
boolean IMPLEMENTATION_DEFINED "Has PMUv3p7 extension"););
// HavePageBasedHardwareAttributes()
// =================================
// HaveRASExt()
// ============
boolean HavePageBasedHardwareAttributes()
returnHaveRASExt()
return ( HasArchVersion(ARMv8p2);) ||
boolean IMPLEMENTATION_DEFINED "Has RAS extension");
// HavePrivATExt()
// ===============
// HaveRNG()
// =========
// Returns TRUE if Random Number Generator extension
// support is implemented and FALSE otherwise.
boolean HavePrivATExt()
HaveRNG()
return HasArchVersion(ARMv8p2ARMv8p5);) && boolean IMPLEMENTATION_DEFINED "Has RNG extension";
// HaveQRDMLAHExt()
// ================
// HaveSBExt()
// ===========
// Returns TRUE if support for SB is implemented, and FALSE otherwise.
boolean HaveQRDMLAHExt()
HaveSBExt()
return HasArchVersion(ARMv8p1ARMv8p5);
boolean HaveAccessFlagUpdateExt()
return HasArchVersion(ARMv8p1);
boolean HaveDirtyBitModifierExt()
return HasArchVersion(ARMv8p1);) || boolean IMPLEMENTATION_DEFINED "Has SB extension";
// HaveRASExt()
// ============
// HaveSSBSExt()
// =============
// Returns TRUE if support for SSBS is implemented, and FALSE otherwise.
boolean HaveRASExt()
return (HaveSSBSExt()
returnHasArchVersion(ARMv8p2ARMv8p5) ||
boolean IMPLEMENTATION_DEFINED "Has RAS extension");) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";
// HaveRNG()
// =========
// Returns TRUE if Random Number Generator extension
// support is implemented and FALSE otherwise.
// HaveSecureEL2Ext()
// ==================
// Returns TRUE if Secure EL2 is implemented.
boolean HaveRNG()
HaveSecureEL2Ext()
return HasArchVersion(ARMv8p5ARMv8p4) && boolean IMPLEMENTATION_DEFINED "Has RNG extension";);
// HaveSBExt()
// ===========
// Returns TRUE if support for SB is implemented, and FALSE otherwise.
// HaveSecureExtDebugView()
// ========================
// Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented.
boolean HaveSBExt()
HaveSecureExtDebugView()
return HasArchVersion(ARMv8p5ARMv8p4) || boolean IMPLEMENTATION_DEFINED "Has SB extension";);
// HaveSSBSExt()
// =============
// Returns TRUE if support for SSBS is implemented, and FALSE otherwise.
// HaveSelfHostedTrace()
// =====================
boolean HaveSSBSExt()
HaveSelfHostedTrace()
return HasArchVersion(ARMv8p5ARMv8p4) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";);
// HaveSecureEL2Ext()
// ==================
// Returns TRUE if Secure EL2 is implemented.
// HaveSmallTranslationTblExt()
// ============================
// Returns TRUE if Small Translation Table Support is implemented.
boolean HaveSecureEL2Ext()
HaveSmallTranslationTableExt()
return HasArchVersion(ARMv8p4);) && boolean IMPLEMENTATION_DEFINED "Has Small Translation Table extension";
// HaveSecureExtDebugView()
// ========================
// Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented.
// HaveStage2MemAttrControl()
// ==========================
// Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented.
boolean HaveSecureExtDebugView()
HaveStage2MemAttrControl()
return HasArchVersion(ARMv8p4);
// HaveSelfHostedTrace()
// =====================
// HaveStatisticalProfiling()
// ==========================
// Returns TRUE if Statistical Profiling Extension is implemented,
// and FALSE otherwise.
boolean HaveSelfHostedTrace()
HaveStatisticalProfiling()
return HasArchVersion(ARMv8p4ARMv8p2);
// HaveSmallTranslationTblExt()
// ============================
// Returns TRUE if Small Translation Table Support is implemented.
// HaveStatisticalProfilingv1p1()
// ==============================
// Returns TRUE if the SPEv1p1 extension is implemented, and FALSE otherwise.
boolean HaveSmallTranslationTableExt()
returnHaveStatisticalProfilingv1p1()
return ( HasArchVersion(ARMv8p4ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has Small Translation Table extension";) &&
boolean IMPLEMENTATION_DEFINED "Has SPEv1p1 extension");
// HaveStage2MemAttrControl()
// ==========================
// Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented.
// HaveStatisticalProfilingv1p2()
// ==============================
// Returns TRUE if the SPEv1p2 extension is implemented, and FALSE otherwise.
boolean HaveStage2MemAttrControl()
returnHaveStatisticalProfilingv1p2()
return ( HasArchVersion() && HaveStatisticalProfilingARMv8p4ARMv8p7);() &&
boolean IMPLEMENTATION_DEFINED "Has SPEv1p2 extension");
// HaveStatisticalProfiling()
// ==========================
// Returns TRUE if Statistical Profiling Extension is implemented,
// and FALSE otherwise.
// HaveTWEDExt()
// =============
// Returns TRUE if Delayed Trapping of WFE instruction support is implemented, and FALSE otherwise.
boolean HaveStatisticalProfiling()
returnHaveTWEDExt()
return boolean IMPLEMENTATION_DEFINED "Has TWED extension"; HasArchVersion(ARMv8p2);
// HaveStatisticalProfilingv1p1()
// ==============================
// Returns TRUE if the SPEv1p1 extension is implemented, and FALSE otherwise.
// HaveTraceExt()
// ==============
// Returns TRUE if Trace functionality as described by the Trace Architecture
// is implemented.
boolean HaveStatisticalProfilingv1p1()
return (HaveTraceExt()
return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality";HasArchVersion(ARMv8p3) &&
boolean IMPLEMENTATION_DEFINED "Has SPEv1p1 extension");
// HaveStatisticalProfilingv1p2()
// ==============================
// Returns TRUE if the SPEv1p2 extension is implemented, and FALSE otherwise.
// HaveTrapLoadStoreMultipleDeviceExt()
// ====================================
boolean HaveStatisticalProfilingv1p2()
return (HaveTrapLoadStoreMultipleDeviceExt()
returnHasArchVersion(ARMv8p7ARMv8p2) && HaveStatisticalProfiling() &&
boolean IMPLEMENTATION_DEFINED "Has SPEv1p2 extension"););
// HaveTWEDExt()
// =============
// Returns TRUE if Delayed Trapping of WFE instruction support is implemented, and FALSE otherwise.
// HaveUAOExt()
// ============
boolean HaveTWEDExt()
return boolean IMPLEMENTATION_DEFINED "Has TWED extension";HaveUAOExt()
returnHasArchVersion(ARMv8p2);
// HaveTraceExt()
// HaveV82Debug()
// ==============
// Returns TRUE if Trace functionality as described by the Trace Architecture
// is implemented.
boolean HaveTraceExt()
return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality";HaveV82Debug()
returnHasArchVersion(ARMv8p2);
// HaveTrapLoadStoreMultipleDeviceExt()
// ====================================
// HaveVirtHostExt()
// =================
boolean HaveTrapLoadStoreMultipleDeviceExt()
HaveVirtHostExt()
return HasArchVersion(ARMv8p2ARMv8p1);
// HaveUAOExt()
// Havev85PMU()
// ============
// Returns TRUE if v8.5-Performance Monitor Unit extension
// support is implemented, and FALSE otherwise.
boolean HaveUAOExt()
Havev85PMU()
return HasArchVersion(ARMv8p2ARMv8p5);) && boolean IMPLEMENTATION_DEFINED "Has PMUv3p5 extension";
// HaveV82Debug()
// ==============
// Havev8p4Debug()
// ===============
// Returns TRUE if support for the Debugv8p4 feature is implemented and FALSE otherwise.
boolean HaveV82Debug()
Havev8p4Debug()
return HasArchVersion(ARMv8p2ARMv8p4);
// HaveVirtHostExt()
// =================
// If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable
// SError interrupt must be taken before executing any instructions in the exception handler.
// However, this can be before the branch to the exception handler is made.
boolean HaveVirtHostExt()
returnInsertIESBBeforeException(bits(2) el); HasArchVersion(ARMv8p1);
// Havev85PMU()
// ============
// Returns TRUE if v8.5-Performance Monitor Unit extension
// support is implemented, and FALSE otherwise.
// BFAdd()
// =======
// Single-precision add following BFloat16 computation behaviors.
booleanbits(32) Havev85PMU()
returnBFAdd(bits(32) op1, bits(32) op2)
bits(32) result;
(type1,sign1,value1) = HasArchVersionBFUnpack((op1);
(type2,sign2,value2) =(op2);
if type1 == FPType_QNaN || type2 == FPType_QNaN then
result = FPDefaultNaN();
else
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then
result = FPZero('0'); // Positive sign when Round to Odd
else
result = BFRoundARMv8p5BFUnpack) && boolean IMPLEMENTATION_DEFINED "Has PMUv3p5 extension";(result_value);
return result;
// Havev8p4Debug()
// ===============
// Returns TRUE if support for the Debugv8p4 feature is implemented and FALSE otherwise.
// BFMatMulAdd()
// =============
// BFloat16 matrix multiply and add to single-precision matrix
// result[2, 2] = addend[2, 2] + (op1[2, 4] * op2[4, 2])
booleanbits(N) Havev8p4Debug()
returnBFMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2)
assert N == 128;
bits(N) result;
bits(32) sum, prod0, prod1;
for i = 0 to 1
for j = 0 to 1
sum = HasArchVersionElem([addend, 2*i + j, 32];
for k = 0 to 1
prod0 =(Elem[op1, 4*i + 2*k + 0, 16], Elem[op2, 4*j + 2*k + 0, 16]);
prod1 = BFMul(Elem[op1, 4*i + 2*k + 1, 16], Elem[op2, 4*j + 2*k + 1, 16]);
sum = BFAdd(sum, BFAdd(prod0, prod1));
ElemARMv8p4BFMul);[result, 2*i + j, 32] = sum;
return result;
// If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable
// SError interrupt must be taken before executing any instructions in the exception handler.
// However, this can be before the branch to the exception handler is made.
boolean// BFMul()
// =======
// BFloat16 widening multiply to single-precision following BFloat16
// computation behaviors.
bits(32) InsertIESBBeforeException(bits(2) el);BFMul(bits(16) op1, bits(16) op2)
bits(32) result;
(type1,sign1,value1) =BFUnpack(op1);
(type2,sign2,value2) = BFUnpack(op2);
if type1 == FPType_QNaN || type2 == FPType_QNaN then
result = FPDefaultNaN();
else
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPDefaultNaN();
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = BFRound(value1*value2);
return result;
// BFAdd()
// =======
// Single-precision add following BFloat16 computation behaviors.
// BFMulAdd()
// ==========
// Used by BFMLALB and BFMLALT instructions.
bits(32)bits(N) BFAdd(bits(32) op1, bits(32) op2)
bits(32) result;
(type1,sign1,value1) =BFMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, BFUnpackFPCRType(op1);
(type2,sign2,value2) =fpcr)
boolean altfp = BFUnpackHaveAltFP(op2);
if type1 ==() && fpcr.AH == '1'; // When TRUE:
boolean fpexc = !altfp; // Do not generate floating point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
if altfp then fpcr.RMode = '00'; // Use RNE rounding mode
return FPType_QNaNFPMulAdd || type2 == FPType_QNaN then
result = FPDefaultNaN();
else
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then
result = FPZero('0'); // Positive sign when Round to Odd
else
result = BFRound(result_value);
return result;(addend, op1, op2, fpcr, fpexc);
// BFMatMulAdd()
// =============
// BFloat16 matrix multiply and add to single-precision matrix
// result[2, 2] = addend[2, 2] + (op1[2, 4] * op2[4, 2])
// BFRound()
// =========
// Converts a real number OP into a single-precision value using the
// Round to Odd rounding mode and following BFloat16 computation behaviors.
bits(N)bits(32) BFMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2)
BFRound(real op)
assert op != 0.0;
bits(32) result;
assert N == 128;
// Format parameters - minimum exponent, numbers of exponent and fraction bits.
minimum_exp = -126; E = 8; F = 23;
bits(N) result;
bits(32) sum, prod0, prod1;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
for i = 0 to 1
for j = 0 to 1
sum = // Fixed Flush-to-zero.
if exponent < minimum_exp then
return ElemFPZero[addend, 2*i + j, 32];
for k = 0 to 1
prod0 =(sign);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp = BFMulMax((exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant =ElemRoundDown[op1, 4*i + 2*k + 0, 16],(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
// Round to Odd
if error != 0.0 then
int_mant<0> = '1';
// Deal with overflow and generate result.
if biased_exp >= 2^E - 1 then
result = ElemFPInfinity[op2, 4*j + 2*k + 0, 16]);
prod1 = BFMul(Elem[op1, 4*i + 2*k + 1, 16], Elem[op2, 4*j + 2*k + 1, 16]);
sum = BFAdd(sum, BFAdd(prod0, prod1));
Elem[result, 2*i + j, 32] = sum;
(sign); // Overflows generate appropriately-signed Infinity
else
result = sign : biased_exp<30-F:0> : int_mant<F-1:0>;
return result;
// BFMul()
// =======
// BFloat16 widening multiply to single-precision following BFloat16
// computation behaviors.
// BFUnpack()
// ==========
// Unpacks a BFloat16 or single-precision value into its type,
// sign bit and real number that it represents.
// The real number result has the correct sign for numbers and infinities,
// is very large in magnitude for infinities, and is 0.0 for NaNs.
// (These values are chosen to simplify the description of
// comparisons and conversions.)
bits(32)(FPType, bit, real) BFMul(bits(16) op1, bits(16) op2)
BFUnpack(bits(N) fpval)
assert N IN {16,32};
bits(32) result;
(type1,sign1,value1) = if N == 16 then
sign = fpval<15>;
exp = fpval<14:7>;
frac = fpval<6:0> : BFUnpackZeros(op1);
(type2,sign2,value2) =(16);
else // N == 32
sign = fpval<31>;
exp = fpval<30:23>;
frac = fpval<22:0>;
if BFUnpackIsZero(op2);
if type1 ==(exp) then
fptype = FPType_QNaNFPType_Zero || type2 ==; value = 0.0; // Fixed Flush to Zero
elsif FPType_QNaNIsOnes then
result =(exp) then
if FPDefaultNaNIsZero();
else
inf1 = (type1 ==(frac) then
fptype = FPType_Infinity);
inf2 = (type2 ==; value = 2.0^1000000;
else // no SNaN for BF16 arithmetic
fptype = FPType_InfinityFPType_QNaN);
zero1 = (type1 ==; value = 0.0;
else
fptype = FPType_ZeroFPType_Nonzero);
zero2 = (type2 ==;
value = 2.0^( FPType_ZeroUInt);
if (inf1 && zero2) || (zero1 && inf2) then
result =(exp)-127) * (1.0 + Real( FPDefaultNaNUInt();
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = BFRound(value1*value2);
(frac)) * 2.0^-23);
return result; if sign == '1' then value = -value;
return (fptype, sign, value);
// BFMulAdd()
// ==========
// Used by BFMLALB and BFMLALT instructions.
// FPConvertBF()
// =============
// Converts a single-precision OP to BFloat16 value with using rounding mode of
// Round to Nearest Even when executed from AArch64 state and
// FPCR.AH == '1', otherwise rounding is controlled by FPCR/FPSCR.
bits(N)bits(16) BFMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,FPConvertBF(bits(32) op, FPCRType fpcr)
boolean altfp =fpcr, FPRounding rounding)
bits(32) result; // BF16 value in top 16 bits
boolean altfp = HaveAltFP() && fpcr.AH == '1'; // When TRUE:
boolean fpexc = !altfp; // Do not generate floating point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
if altfp then fpcr.RMode = '00'; // Use RNE rounding mode
return() && ! () && fpcr.AH == '1';
boolean fpexc = !altfp; // Generate no floating-point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
if altfp then rounding = FPRounding_TIEEVEN; // Use RNE rounding mode
// Unpack floating-point operand, with always flush-to-zero if fpcr.AH == '1'.
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
if fpcr.DN == '1' then
result = FPDefaultNaN();
else
result = FPConvertNaN(op);
if fptype == FPType_SNaN then
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCVBF(value, fpcr, rounding, fpexc);
// Returns correctly rounded BF16 value from top 16 bits
return result<31:16>;
// FPConvertBF()
// =============
// Converts a single-precision operand to BFloat16 value.
bits(16) FPConvertBF(bits(32) op, FPCRType fpcr)
return FPConvertBF(op, fpcr, FPRoundingModeFPMulAddUsingAArch32(addend, op1, op2, fpcr, fpexc);(fpcr));
// BFRound()
// =========
// Converts a real number OP into a single-precision value using the
// Round to Odd rounding mode and following BFloat16 computation behaviors.
// FPRoundCVBF()
// =============
// Converts a real number OP into a BFloat16 value using the supplied
// rounding mode RMODE. The 'fpexc' argument controls the generation of
// floating-point exceptions.
bits(32) BFRound(real op)
assert op != 0.0;
bits(32) result;
// Format parameters - minimum exponent, numbers of exponent and fraction bits.
minimum_exp = -126; E = 8; F = 23;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
// Fixed Flush-to-zero.
if exponent < minimum_exp then
returnFPRoundCVBF(real op, FPZeroFPCRType(sign);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp =fpcr, MaxFPRounding(exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant =rounding, boolean fpexc)
boolean isbfloat16 = TRUE;
return RoundDownFPRoundBase(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
// Round to Odd
if error != 0.0 then
int_mant<0> = '1';
// Deal with overflow and generate result.
if biased_exp >= 2^E - 1 then
result = FPInfinity(sign); // Overflows generate appropriately-signed Infinity
else
result = sign : biased_exp<30-F:0> : int_mant<F-1:0>;
return result;(op, fpcr, rounding, isbfloat16, fpexc);
// BFUnpack()
// ==========
// Unpacks a BFloat16 or single-precision value into its type,
// sign bit and real number that it represents.
// The real number result has the correct sign for numbers and infinities,
// is very large in magnitude for infinities, and is 0.0 for NaNs.
// (These values are chosen to simplify the description of
// comparisons and conversions.)
// FixedToFP()
// ===========
(FPType, bit, real)// Convert M-bit fixed point OP with FBITS fractional bits to
// N-bit precision floating point, controlled by UNSIGNED and ROUNDING.
bits(N) BFUnpack(bits(N) fpval)
assert N IN {16,32};
if N == 16 then
sign = fpval<15>;
exp = fpval<14:7>;
frac = fpval<6:0> :FixedToFP(bits(M) op, integer fbits, boolean unsigned, ZerosFPCRType(16);
else // N == 32
sign = fpval<31>;
exp = fpval<30:23>;
frac = fpval<22:0>;
iffpcr, IsZeroFPRounding(exp) then
fptype =rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(N) result;
assert fbits >= 0;
assert rounding != FPType_ZeroFPRounding_ODD; value = 0.0; // Fixed Flush to Zero
elsif;
// Correct signed-ness
int_operand = IsOnesInt(exp) then
if(op, unsigned);
// Scale by fractional bits and generate a real value
real_operand = Real(int_operand) / 2.0^fbits;
if real_operand == 0.0 then
result = IsZeroFPZero(frac) then
fptype =('0');
else
result = FPType_InfinityFPRound; value = 2.0^1000000;
else // no SNaN for BF16 arithmetic
fptype = FPType_QNaN; value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp)-127) * (1.0 + Real(UInt(frac)) * 2.0^-23);
(real_operand, fpcr, rounding);
if sign == '1' then value = -value;
return (fptype, sign, value); return result;
// FPConvertBF()
// =============
// Converts a single-precision OP to BFloat16 value with using rounding mode of
// Round to Nearest Even when executed from AArch64 state and
// FPCR.AH == '1', otherwise rounding is controlled by FPCR/FPSCR.
// FPAbs()
// =======
bits(16)bits(N) FPConvertBF(bits(32) op,FPAbs(bits(N) op)
assert N IN {16,32,64};
if ! FPCRTypeUsingAArch32 fpcr,() && FPRounding rounding)
bits(32) result; // BF16 value in top 16 bits
boolean altfp = HaveAltFP() && !() thenUsingAArch32FPCRType() && fpcr.AH == '1';
boolean fpexc = !altfp; // Generate no floating-point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
if altfp then rounding =fpcr = FPCR[];
if fpcr.AH == '1' then
(fptype, -, -) = FPRounding_TIEEVEN; // Use RNE rounding mode
// Unpack floating-point operand, with always flush-to-zero if fpcr.AH == '1'.
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
if fptype ==(op, fpcr, FALSE);
if fptype IN { FPType_SNaN || fptype ==, FPType_QNaN then
if fpcr.DN == '1' then
result = FPDefaultNaN();
else
result = FPConvertNaN(op);
if fptype == FPType_SNaN then
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCVBF(value, fpcr, rounding, fpexc);
// Returns correctly rounded BF16 value from top 16 bits
return result<31:16>;
// FPConvertBF()
// =============
// Converts a single-precision operand to BFloat16 value.
bits(16) FPConvertBF(bits(32) op, FPCRType fpcr)
return FPConvertBF(op, fpcr, FPRoundingMode(fpcr));} then
return op; // When fpcr.AH=1, sign of NaN has no consequence
return '0' : op<N-2:0>;
// FPRoundCVBF()
// =============
// Converts a real number OP into a BFloat16 value using the supplied
// rounding mode RMODE. The 'fpexc' argument controls the generation of
// floating-point exceptions.
// FPAdd()
// =======
bits(32)bits(N) FPRoundCVBF(real op,FPAdd(bits(N) op1, bits(N) op2, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
rounding = FPRoundingFPRoundingMode rounding, boolean fpexc)
boolean isbfloat16 = TRUE;
return(fpcr);
(type1,sign1,value1) = (op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding);
FPProcessDenormsFPRoundBaseFPUnpack(op, fpcr, rounding, isbfloat16, fpexc);(type1, type2, N, fpcr);
return result;
// FixedToFP()
// FPCompare()
// ===========
// Convert M-bit fixed point OP with FBITS fractional bits to
// N-bit precision floating point, controlled by UNSIGNED and ROUNDING.
bits(N)bits(4) FixedToFP(bits(M) op, integer fbits, boolean unsigned,FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPRoundingFPUnpack rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(N) result;
assert fbits >= 0;
assert rounding !=(op1, fpcr);
(type2,sign2,value2) = FPRounding_ODDFPUnpack;
(op2, fpcr);
// Correct signed-ness
int_operand = if type1 IN { IntFPType_SNaN(op, unsigned);
// Scale by fractional bits and generate a real value
real_operand = Real(int_operand) / 2.0^fbits;
if real_operand == 0.0 then
result =, FPZeroFPType_QNaN('0');
else
result =} || type2 IN { , FPType_QNaN} then
result = '0011';
if type1 == FPType_SNaN || type2 == FPType_SNaN || signal_nans then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
if value1 == value2 then
result = '0110';
elsif value1 < value2 then
result = '1000';
else // value1 > value2
result = '0010';
FPProcessDenormsFPRoundFPType_SNaN(real_operand, fpcr, rounding);
(type1, type2, N, fpcr);
return result;
// FPAbs()
// =======
// FPCompareEQ()
// =============
bits(N)boolean FPAbs(bits(N) op)
assert N IN {16,32,64};
if !FPCompareEQ(bits(N) op1, bits(N) op2,UsingAArch32() && HaveAltFP() then
FPCRType fpcr = FPCR[];
if fpcr.AH == '1' then
(fptype, -, -) =fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op, fpcr, FALSE);
if fptype IN {(op1, fpcr);
(type2,sign2,value2) =FPUnpack(op2, fpcr);
if type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then
result = FALSE;
if type1 == FPType_SNaN || type2 == FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 == value2);
FPProcessDenorms} then
return op; // When fpcr.AH=1, sign of NaN has no consequence
(type1, type2, N, fpcr);
return '0' : op<N-2:0>; return result;
// FPAdd()
// =======
// FPCompareGE()
// =============
bits(N)boolean FPAdd(bits(N) op1, bits(N) op2,FPCompareGE(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
rounding = (type1,sign1,value1) = FPRoundingMode(fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = if type1 IN { FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==, FPType_InfinityFPType_QNaN); inf2 = (type2 ==} || type2 IN { FPType_InfinityFPType_SNaN);
zero1 = (type1 ==, FPType_ZeroFPType_QNaN); zero2 = (type2 ==} then
result = FALSE; FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding);, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 >= value2);
FPProcessDenorms(type1, type2, N, fpcr);
return result;
// FPCompare()
// ===========
// FPCompareGT()
// =============
bits(4)boolean FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans,FPCompareGT(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then
result = '0011';
if type1 == result = FALSE; FPType_SNaN || type2 == FPType_SNaN || signal_nans then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
if value1 == value2 then
result = '0110';
elsif value1 < value2 then
result = '1000';
else // value1 > value2
result = '0010'; result = (value1 > value2);
FPProcessDenorms(type1, type2, N, fpcr);
return result;
// FPCompareEQ()
// =============
// FPConvert()
// ===========
boolean// Convert floating point OP with N-bit precision to M-bit precision,
// with rounding controlled by ROUNDING.
// This is used by the FP-to-FP conversion instructions and so for
// half-precision data ignores FZ16, but observes AHP.
bits(M) FPCompareEQ(bits(N) op1, bits(N) op2,FPConvert(bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =fpcr, FPUnpackFPRounding(op1, fpcr);
(type2,sign2,value2) =rounding)
assert M IN {16,32,64};
assert N IN {16,32,64};
bits(M) result;
// Unpack floating-point operand optionally with flush-to-zero.
(fptype,sign,value) = FPUnpackFPUnpackCV(op2, fpcr);
(op, fpcr);
if type1 IN { alt_hp = (M == 16) && (fpcr.AHP == '1');
if fptype ==FPType_SNaN,|| fptype == FPType_QNaN} || type2 IN {then
if alt_hp then
result =FPType_SNaNFPZero,(sign);
elsif fpcr.DN == '1' then
result = FPType_QNaNFPDefaultNaN} then
result = FALSE;
if type1 ==();
else
result = FPType_SNaNFPConvertNaN || type2 ==(op);
if fptype == FPType_SNaN then|| alt_hp then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 == value2);,fpcr);
elsif fptype ==
then
if alt_hp then
result = sign:Ones(M-1);
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCV(value, fpcr, rounding);
FPProcessDenorm(fptype, N, fpcr);
return result;
// FPConvert()
// ===========
bits(M) FPConvert(bits(N) op, FPCRType fpcr)
return FPConvert(op, fpcr, FPRoundingModeFPProcessDenormsFPType_Infinity(type1, type2, N, fpcr);
return result;(fpcr));
// FPCompareGE()
// =============
// FPConvertNaN()
// ==============
// Converts a NaN of one floating-point type to another
booleanbits(M) FPCompareGE(bits(N) op1, bits(N) op2,FPConvertNaN(bits(N) op)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(M) result;
bits(51) frac;
sign = op<N-1>;
// Unpack payload from input NaN
case N of
when 64 frac = op<50:0>;
when 32 frac = op<21:0>: FPCRTypeZeros fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =(29);
when 16 frac = op<8:0>: FPUnpackZeros(op1, fpcr);
(type2,sign2,value2) =(42);
// Repack payload into output NaN, while
// converting an SNaN to a QNaN.
case M of
when 64 result = sign: FPUnpackOnes(op2, fpcr);
if type1 IN {(M-52):frac;
when 32 result = sign:FPType_SNaNOnes,(M-23):frac<50:29>;
when 16 result = sign: FPType_QNaNOnes} || type2 IN {FPType_SNaN, FPType_QNaN} then
result = FALSE;
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 >= value2);
FPProcessDenorms(type1, type2, N, fpcr);
(M-10):frac<50:42>;
return result;
// FPCompareGT()
// =============
booleantype FPCompareGT(bits(N) op1, bits(N) op2,FPCRType; FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then
result = FALSE;
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 > value2);
FPProcessDenorms(type1, type2, N, fpcr);
return result;
// FPConvert()
// ===========
// FPDecodeRM()
// ============
// Convert floating point OP with N-bit precision to M-bit precision,
// with rounding controlled by ROUNDING.
// This is used by the FP-to-FP conversion instructions and so for
// half-precision data ignores FZ16, but observes AHP.
// Decode most common AArch32 floating-point rounding encoding.
bits(M)FPRounding FPConvert(bits(N) op,FPDecodeRM(bits(2) rm)
case rm of
when '00' result = FPCRTypeFPRounding_TIEAWAY fpcr,; // A
when '01' result = FPRoundingFPRounding_TIEEVEN rounding)
assert M IN {16,32,64};
assert N IN {16,32,64};
bits(M) result;
// Unpack floating-point operand optionally with flush-to-zero.
(fptype,sign,value) =; // N
when '10' result = FPUnpackCVFPRounding_POSINF(op, fpcr);
alt_hp = (M == 16) && (fpcr.AHP == '1');
if fptype ==; // P
when '11' result = FPType_SNaNFPRounding_NEGINF || fptype == FPType_QNaN then
if alt_hp then
result = FPZero(sign);
elsif fpcr.DN == '1' then
result = FPDefaultNaN();
else
result = FPConvertNaN(op);
if fptype == FPType_SNaN || alt_hp then
FPProcessException(FPExc_InvalidOp,fpcr);
elsif fptype == FPType_Infinity then
if alt_hp then
result = sign:Ones(M-1);
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCV(value, fpcr, rounding);
FPProcessDenorm(fptype, N, fpcr);
return result;
// FPConvert()
// ===========
bits(M) FPConvert(bits(N) op, FPCRType fpcr)
return FPConvert(op, fpcr, FPRoundingMode(fpcr));; // M
return result;
// FPConvertNaN()
// ==============
// Converts a NaN of one floating-point type to another
// FPDecodeRounding()
// ==================
bits(M)// Decode floating-point rounding mode and common AArch64 encoding.
FPRounding FPConvertNaN(bits(N) op)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(M) result;
bits(51) frac;
sign = op<N-1>;
// Unpack payload from input NaN
case N of
when 64 frac = op<50:0>;
when 32 frac = op<21:0>:FPDecodeRounding(bits(2) rmode)
case rmode of
when '00' returnZerosFPRounding_TIEEVEN(29);
when 16 frac = op<8:0>:; // N
when '01' returnZerosFPRounding_POSINF(42);
// Repack payload into output NaN, while
// converting an SNaN to a QNaN.
case M of
when 64 result = sign:; // P
when '10' returnOnesFPRounding_NEGINF(M-52):frac;
when 32 result = sign:; // M
when '11' returnOnesFPRounding_ZERO(M-23):frac<50:29>;
when 16 result = sign:Ones(M-10):frac<50:42>;
return result;; // Z
type// FPDefaultNaN()
// ==============
bits(N) FPCRType;FPDefaultNaN()
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
bit sign;
if !UsingAArch32() && HaveAltFP() then
FPCRType fpcr = FPCR[];
sign = if fpcr.AH == '1' then '1' else '0';
else
sign = '0';
bits(E) exp = Ones(E);
bits(F) frac = '1':Zeros(F-1);
return sign : exp : frac;
// FPDecodeRM()
// ============
// FPDiv()
// =======
// Decode most common AArch32 floating-point rounding encoding.
FPRoundingbits(N) FPDecodeRM(bits(2) rm)
case rm of
when '00' result =FPDiv(bits(N) op1, bits(N) op2, FPRounding_TIEAWAYFPCRType; // A
when '01' result =fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPRounding_TIEEVENFPUnpack; // N
when '10' result =(op1, fpcr);
(type2,sign2,value2) = FPRounding_POSINFFPUnpack; // P
when '11' result =(op2, fpcr);
(done,result) = (type1, type2, op1, op2, fpcr);
if !done then
inf1 = type1 == FPType_Infinity;
inf2 = type2 == FPType_Infinity;
zero1 = type1 == FPType_Zero;
zero2 = type2 == FPType_Zero;
if (inf1 && inf2) || (zero1 && zero2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif inf1 || zero2 then
result = FPInfinity(sign1 EOR sign2);
if !inf1 then FPProcessException(FPExc_DivideByZero, fpcr);
elsif zero1 || inf2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1/value2, fpcr);
if !zero2 then
FPProcessDenormsFPRounding_NEGINFFPProcessNaNs; // M
(type1, type2, N, fpcr);
return result;
// FPDecodeRounding()
// ==================
// Decode floating-point rounding mode and common AArch64 encoding.
FPRoundingenumeration FPDecodeRounding(bits(2) rmode)
case rmode of
when '00' returnFPExc { FPRounding_TIEEVEN; // N
when '01' returnFPExc_InvalidOp, FPRounding_POSINF; // P
when '10' returnFPExc_DivideByZero, FPRounding_NEGINF; // M
when '11' returnFPExc_Overflow, FPExc_Underflow, FPExc_Inexact, FPRounding_ZERO; // ZFPExc_InputDenorm};
// FPDefaultNaN()
// ==============
// FPInfinity()
// ============
bits(N) FPDefaultNaN()
FPInfinity(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
bit sign;
if ! bits(E) exp =UsingAArch32() && HaveAltFP() then
FPCRType fpcr = FPCR[];
sign = if fpcr.AH == '1' then '1' else '0';
else
sign = '0';
bits(E) exp = Ones(E);
bits(F) frac = '1': bits(F) frac =Zeros(F-1);
(F);
return sign : exp : frac;
// FPDiv()
// =======
// FPMatMulAdd()
// =============
//
// Floating point matrix multiply and add to same precision matrix
// result[2, 2] = addend[2, 2] + (op1[2, 2] * op2[2, 2])
bits(N) FPDiv(bits(N) op1, bits(N) op2,FPMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, integer esize, FPCRType fpcr)
assert N == esize * 2 * 2;
bits(N) result;
bits(esize) prod0, prod1, sum;
assert N IN {16,32,64};
(type1,sign1,value1) = for i = 0 to 1
for j = 0 to 1
sum = FPUnpackElem(op1, fpcr);
(type2,sign2,value2) =[addend, 2*i + j, esize];
prod0 = FPUnpackFPMul(op2, fpcr);
(done,result) =( FPProcessNaNsElem(type1, type2, op1, op2, fpcr);
if !done then
inf1 = type1 ==[op1, 2*i + 0, esize], FPType_InfinityElem;
inf2 = type2 ==[op2, 2*j + 0, esize], fpcr);
prod1 = FPType_InfinityFPMul;
zero1 = type1 ==( FPType_ZeroElem;
zero2 = type2 ==[op1, 2*i + 1, esize], FPType_ZeroElem;
if (inf1 && inf2) || (zero1 && zero2) then
result =[op2, 2*j + 1, esize], fpcr);
sum = FPDefaultNaNFPAdd();(sum,
FPProcessExceptionFPAdd((prod0, prod1, fpcr), fpcr);FPExc_InvalidOpElem, fpcr);
elsif inf1 || zero2 then
result = FPInfinity(sign1 EOR sign2);
if !inf1 then FPProcessException(FPExc_DivideByZero, fpcr);
elsif zero1 || inf2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1/value2, fpcr);
if !zero2 then
FPProcessDenorms(type1, type2, N, fpcr);
[result, 2*i + j, esize] = sum;
return result;
enumeration// FPMax()
// =======
bits(N) FPExc {FPMax(bits(N) op1, bits(N) op2,FPExc_InvalidOp,fpcr)
boolean altfp = FPExc_DivideByZero,() && ! FPExc_Overflow,() && fpcr.AH == '1';
return
FPExc_Underflow,(op1, op2, fpcr, altfp);
// FPMax()
// =======
// Compare two inputs and return the larger value after rounding. The
// 'fpcr' argument supplies the FPCR control bits and 'altfp' determines
// if the function should use alternative floating-point behaviour.
bits(N) FPExc_Inexact,FPMax(bits(N) op1, bits(N) op2, fpcr, boolean altfp)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if (altfp && type1 == FPType_Zero && type2 == FPType_Zero &&
((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then
return FPZero(sign2);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, altfp, TRUE);
if !done then
if value1 > value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
sign = sign1 AND sign2; // Use most positive sign
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
rounding = FPRoundingMode(fpcr);
if altfp then // Denormal output is not flushed to zero
fpcr.FZ = '0';
fpcr.FZ16 = '0';
result = FPRound(value, fpcr, rounding, TRUE);
FPProcessDenormsFPExc_InputDenorm};(type1, type2, N, fpcr);
return result;
// FPInfinity()
// ============
// FPMaxNormal()
// =============
bits(N) FPInfinity(bit sign)
FPMaxNormal(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
bits(E) exp = exp = Ones(E);
bits(F) frac =(E-1):'0';
frac = ZerosOnes(F);
return sign : exp : frac;
// FPMatMulAdd()
// =============
//
// Floating point matrix multiply and add to same precision matrix
// result[2, 2] = addend[2, 2] + (op1[2, 2] * op2[2, 2])
// FPMaxNum()
// ==========
bits(N) FPMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, integer esize,FPMaxNum(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N == esize * 2 * 2;
bits(N) result;
bits(esize) prod0, prod1, sum;
for i = 0 to 1
for j = 0 to 1
sum = assert N IN {16,32,64};
(type1,-,-) = ElemFPUnpack[addend, 2*i + j, esize];
prod0 =(op1, fpcr);
(type2,-,-) = FPMulFPUnpack((op2, fpcr);
boolean type1_nan = type1 IN {ElemFPType_QNaN[op1, 2*i + 0, esize],,
ElemFPType_SNaN[op2, 2*j + 0, esize], fpcr);
prod1 =};
boolean type2_nan = type2 IN { FPMulFPType_QNaN(,ElemFPType_SNaN[op1, 2*i + 1, esize],};
boolean altfp =
ElemHaveAltFP[op2, 2*j + 1, esize], fpcr);
sum =() && ! FPAddUsingAArch32(sum,() && fpcr.AH == '1';
if !(altfp && type1_nan && type2_nan) then
// Treat a single quiet-NaN as -Infinity.
if type1 == FPAddFPType_QNaN(prod0, prod1, fpcr), fpcr);&& type2 !=
then
op1 = FPInfinity('1');
elsif type1 != FPType_QNaN && type2 == FPType_QNaN then
op2 = FPInfinity('1');
altfmaxfmin = FALSE; // Restrict use of FMAX/FMIN NaN propagation rules
result = FPMaxElemFPType_QNaN[result, 2*i + j, esize] = sum;
(op1, op2, fpcr, altfmaxfmin);
return result;
// FPMax()
// =======
// IsMerging()
// ===========
// Returns TRUE if the output elements other than the lowest are taken from
// the destination register.
bits(N)boolean FPMax(bits(N) op1, bits(N) op2,IsMerging( FPCRType fpcr)
boolean altfp = boolean merge = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
return() && fpcr.NEP == '1';
return merge; FPMax(op1, op2, fpcr, altfp);
// FPMax()
// =======
// Compare two inputs and return the larger value after rounding. The
// 'fpcr' argument supplies the FPCR control bits and 'altfp' determines
// if the function should use alternative floating-point behaviour.
bits(N) FPMax(bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfp)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if (altfp && type1 == FPType_Zero && type2 == FPType_Zero &&
((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then
return FPZero(sign2);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, altfp, TRUE);
if !done then
if value1 > value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
sign = sign1 AND sign2; // Use most positive sign
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
rounding = FPRoundingMode(fpcr);
if altfp then // Denormal output is not flushed to zero
fpcr.FZ = '0';
fpcr.FZ16 = '0';
result = FPRound(value, fpcr, rounding, TRUE);
FPProcessDenorms(type1, type2, N, fpcr);
return result;
// FPMaxNormal()
// =============
// FPMin()
// =======
bits(N) FPMaxNormal(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =FPMin(bits(N) op1, bits(N) op2, OnesFPCRType(E-1):'0';
frac =fpcr)
boolean altfp = () && !UsingAArch32() && fpcr.AH == '1';
return FPMin(op1, op2, fpcr, altfp);
// FPMin()
// =======
// Compare two operands and return the smaller operand after rounding. The
// 'fpcr' argument supplies the FPCR control bits and 'altfp' determines
// if the function should use alternative behaviour.
bits(N) FPMin(bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfp)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if (altfp && type1 == FPType_Zero && type2 == FPType_Zero &&
((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then
return FPZero(sign2);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, altfp, TRUE);
if !done then
if value1 < value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
sign = sign1 OR sign2; // Use most negative sign
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
rounding = FPRoundingMode(fpcr);
if altfp then // Denormal output is not flushed to zero
fpcr.FZ = '0';
fpcr.FZ16 = '0';
result = FPRound(value, fpcr, rounding, TRUE);
FPProcessDenormsOnesHaveAltFP(F);
(type1, type2, N, fpcr);
return sign : exp : frac; return result;
// FPMaxNum()
// FPMinNum()
// ==========
bits(N) FPMaxNum(bits(N) op1, bits(N) op2,FPMinNum(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,-,-) = FPUnpack(op1, fpcr);
(type2,-,-) = FPUnpack(op2, fpcr);
boolean type1_nan = type1 IN {FPType_QNaN, FPType_SNaN};
boolean type2_nan = type2 IN {FPType_QNaN, FPType_SNaN};
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
if !(altfp && type1_nan && type2_nan) then
// Treat a single quiet-NaN as -Infinity.
// Treat a single quiet-NaN as +Infinity.
if type1 == FPType_QNaN && type2 != FPType_QNaN then
op1 = FPInfinity('1');
('0');
elsif type1 != FPType_QNaN && type2 == FPType_QNaN then
op2 = FPInfinity('1');
('0');
altfmaxfmin = FALSE; // Restrict use of FMAX/FMIN NaN propagation rules
result = FPMaxFPMin(op1, op2, fpcr, altfmaxfmin);
return result;
// IsMerging()
// ===========
// Returns TRUE if the output elements other than the lowest are taken from
// the destination register.
// FPMul()
// =======
booleanbits(N) IsMerging(FPMul(bits(N) op1, bits(N) op2,FPCRType fpcr)
boolean merge = assert N IN {16,32,64};
(type1,sign1,value1) = HaveAltFPFPUnpack() && !(op1, fpcr);
(type2,sign2,value2) =(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1*value2, fpcr);
FPProcessDenormsUsingAArch32FPUnpack() && fpcr.NEP == '1';
return merge;(type1, type2, N, fpcr);
return result;
// FPMin()
// =======
// FPMulAdd()
// ==========
bits(N) FPMin(bits(N) op1, bits(N) op2,FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, FPCRType fpcr)
boolean altfp = boolean fpexc = TRUE; // Generate floating-point exceptions
return HaveAltFPFPMulAdd() && !(addend, op1, op2, fpcr, fpexc);
// FPMulAdd()
// ==========
//
// Calculates addend + op1*op2 with a single rounding. The 'fpcr' argument
// supplies the FPCR control bits, and 'fpexc' controls the generation of
// floating-point exceptions.
bits(N)UsingAArch32() && fpcr.AH == '1';
return FPMin(op1, op2, fpcr, altfp);
// FPMin()
// =======
// Compare two operands and return the smaller operand after rounding. The
// 'fpcr' argument supplies the FPCR control bits and 'altfp' determines
// if the function should use alternative behaviour.
bits(N) FPMin(bits(N) op1, bits(N) op2,FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfp)
fpcr, boolean fpexc)
assert N IN {16,32,64};
assert N IN {16,32,64};
(type1,sign1,value1) = (typeA,signA,valueA) = FPUnpack(op1, fpcr);
(type2,sign2,value2) =(addend, fpcr, fpexc);
(type1,sign1,value1) = FPUnpack(op2, fpcr);
if (altfp && type1 ==(op1, fpcr, fpexc);
(type2,sign2,value2) = FPUnpack(op2, fpcr, fpexc);
rounding = FPRoundingMode(fpcr);
inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero && type2 ==);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero &&
((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then
return);
(done,result) = FPZeroFPProcessNaNs3(sign2);
(typeA, type1, type2, addend, op1, op2, fpcr, fpexc);
(done,result) = if !( FPProcessNaNsHaveAltFP(type1, type2, op1, op2, fpcr, altfp, TRUE);
if !done then
if value1 < value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype ==() && ! UsingAArch32() && fpcr.AH == '1') then
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity then
result =); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an
// Invalid Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero
// by infinity and additions of opposite-signed infinities.
invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP);
if invalidop then
result = FPDefaultNaN();
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity(sign);
elsif fptype ==('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = FPType_ZeroFPInfinity then
sign = sign1 OR sign2; // Use most negative sign
('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(sign);
(signA);
// Otherwise calculate numerical result and round it.
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
rounding = result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRoundingModeFPRounding_NEGINF(fpcr);
if altfp then // Denormal output is not flushed to zero
fpcr.FZ = '0';
fpcr.FZ16 = '0';
result =then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(value, fpcr, rounding, TRUE);(result_value, fpcr, rounding, fpexc);
if !invalidop && fpexc then
FPProcessDenormsFPProcessDenorms3(type1, type2, N, fpcr);
(typeA, type1, type2, N, fpcr);
return result;
// FPMinNum()
// ==========
// FPMulAddH()
// ===========
// Calculates addend + op1*op2.
bits(N)bits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2, FPMinNum(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,-,-) = assert N == 32;
rounding = FPRoundingMode(fpcr);
(typeA,signA,valueA) = FPUnpack(op1, fpcr);
(type2,-,-) =(addend, fpcr);
(type1,sign1,value1) = FPUnpack(op2, fpcr);
boolean type1_nan = type1 IN {(op1, fpcr);
(type2,sign2,value2) =FPType_QNaNFPUnpack,(op2, fpcr);
inf1 = (type1 == FPType_SNaNFPType_Infinity};
boolean type2_nan = type2 IN {); zero1 = (type1 ==FPType_QNaNFPType_Zero,);
inf2 = (type2 == FPType_SNaNFPType_Infinity};
boolean altfp =); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr);
if !(HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
if !(altfp && type1_nan && type2_nan) then
// Treat a single quiet-NaN as +Infinity.
if type1 ==() && fpcr.AH == '1') then
if typeA == FPType_QNaN && type2 !=&& ((inf1 && zero2) || (zero1 && inf2)) then
result = FPType_QNaNFPDefaultNaN then
op1 =(); FPInfinityFPProcessException('0');
elsif type1 !=( FPType_QNaNFPExc_InvalidOp && type2 ==, fpcr);
if !done then
infA = (typeA == FPType_QNaNFPType_Infinity then
op2 =); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an
// Invalid Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP);
if invalidop then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('0');
altfmaxfmin = FALSE; // Restrict use of FMAX/FMIN NaN propagation rules
result = elsif (infA && signA == '1') || (infP && signP == '1') then
result = ('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr);
if !invalidop then
FPProcessDenormFPMinFPInfinity(op1, op2, fpcr, altfmaxfmin);
(typeA, N, fpcr);
return result;
// FPMul()
// =======
// FPProcessNaNs3H()
// =================
bits(N)(boolean, bits(N)) FPProcessNaNs3H( FPMul(bits(N) op1, bits(N) op2,type1, FPType type2, FPType type3,
bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3,
FPCRType fpcr)
assert N IN {32,64};
assert N IN {16,32,64};
(type1,sign1,value1) = bits(N) result;
// When TRUE, use alternative NaN propagation rules.
boolean altfp = FPUnpackHaveAltFP(op1, fpcr);
(type2,sign2,value2) =() && ! FPUnpackUsingAArch32(op2, fpcr);
(done,result) =() && fpcr.AH == '1';
boolean op1_nan = type1 IN { FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==, FPType_InfinityFPType_QNaN);
inf2 = (type2 ==};
boolean op2_nan = type2 IN { FPType_InfinityFPType_SNaN);
zero1 = (type1 ==, FPType_ZeroFPType_QNaN);
zero2 = (type2 ==};
boolean op3_nan = type3 IN { FPType_ZeroFPType_SNaN);
if (inf1 && zero2) || (zero1 && inf2) then
result =, FPDefaultNaNFPType_QNaN();};
boolean fpexc = TRUE;
if altfp then
if (type1 ==
FPProcessExceptionFPType_SNaN(|| type2 ==FPExc_InvalidOpFPType_SNaN, fpcr);
elsif inf1 || inf2 then
result =|| type3 == FPInfinityFPType_SNaN(sign1 EOR sign2);
elsif zero1 || zero2 then
result =) then
type_nan = FPZeroFPType_SNaN(sign1 EOR sign2);
;
else
result = type_nan = FPRoundFPType_QNaN(value1*value2, fpcr);;
if altfp && op1_nan && op2_nan && op3_nan then
done = TRUE; result =
(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected
elsif altfp && op2_nan && (op1_nan || op3_nan) then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected
elsif altfp && op3_nan && op1_nan then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op3, fpcr, fpexc)); // <m> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc));
elsif type3 == FPType_SNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc));
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc));
elsif type3 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc));
else
done = FALSE; result = ZerosFPProcessDenormsFPConvertNaN(type1, type2, N, fpcr);
return result;(); // 'Don't care' result
return (done, result);
// FPMulAdd()
// ==========
// FPMulX()
// ========
bits(N) FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,FPMulX(bits(N) op1, bits(N) op2, FPCRType fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
return assert N IN {16,32,64};
bits(N) result;
(type1,sign1,value1) = FPMulAdd(addend, op1, op2, fpcr, fpexc);
// FPMulAdd()
// ==========
//
// Calculates addend + op1*op2 with a single rounding. The 'fpcr' argument
// supplies the FPCR control bits, and 'fpexc' controls the generation of
// floating-point exceptions.
bits(N) FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,
FPCRType fpcr, boolean fpexc)
assert N IN {16,32,64};
(typeA,signA,valueA) = FPUnpack(addend, fpcr, fpexc);
(type1,sign1,value1) =(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op1, fpcr, fpexc);
(type2,sign2,value2) =(op2, fpcr);
(done,result) = FPUnpackFPProcessNaNs(op2, fpcr, fpexc);
rounding =(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPRoundingMode(fpcr);
inf1 = (type1 == FPType_Infinity); zero1 = (type1 ==);
inf2 = (type2 == FPType_Zero);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 ==);
zero1 = (type1 == FPType_Zero);
(done,result) = zero2 = (type2 == FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr, fpexc);
if !(HaveAltFP() && !UsingAArch32() && fpcr.AH == '1') then
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an
// Invalid Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero
// by infinity and additions of opposite-signed infinities.
invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP);
if invalidop then
if (inf1 && zero2) || (zero1 && inf2) then
result = FPDefaultNaNFPTwo();
if fpexc then(sign1 EOR sign2);
elsif inf1 || inf2 then
result = FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPInfinity('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
(sign1 EOR sign2);
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == result = FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding, fpexc);
if !invalidop && fpexc then(value1*value2, fpcr);
FPProcessDenorms3FPProcessDenorms(typeA, type1, type2, N, fpcr);
(type1, type2, N, fpcr);
return result;
// FPMulAddH()
// ===========
// Calculates addend + op1*op2.
// FPNeg()
// =======
bits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2,bits(N) FPCRType fpcr)
assert N == 32;
rounding =FPNeg(bits(N) op)
assert N IN {16,32,64};
if ! FPRoundingModeUsingAArch32(fpcr);
(typeA,signA,valueA) =() && FPUnpackHaveAltFP(addend, fpcr);
(type1,sign1,value1) =() then FPUnpackFPCRType(op1, fpcr);
(type2,sign2,value2) =fpcr = FPCR[];
if fpcr.AH == '1' then
(fptype, -, -) = FPUnpack(op2, fpcr);
inf1 = (type1 ==(op, fpcr, FALSE);
if fptype IN { FPType_InfinityFPType_SNaN); zero1 = (type1 ==, FPType_Zero);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr);
if !(HaveAltFP() && !UsingAArch32() && fpcr.AH == '1') then
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an
// Invalid Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP);
if invalidop then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = FPInfinity('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr);
if !invalidop then
FPProcessDenorm(typeA, N, fpcr);
return result;} then
return op; // When fpcr.AH=1, sign of NaN has no consequence
return NOT(op<N-1>) : op<N-2:0>;
// FPProcessNaNs3H()
// =================
// FPOnePointFive()
// ================
(boolean, bits(N)) FPProcessNaNs3H(bits(N)FPType type1,FPOnePointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0': FPTypeOnes type2, FPType type3,
bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3,
FPCRType fpcr)
assert N IN {32,64};
bits(N) result;
// When TRUE, use alternative NaN propagation rules.
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN};
boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN};
boolean op3_nan = type3 IN {FPType_SNaN, FPType_QNaN};
boolean fpexc = TRUE;
if altfp then
if (type1 == FPType_SNaN || type2 == FPType_SNaN || type3 == FPType_SNaN) then
type_nan = FPType_SNaN;
else
type_nan = FPType_QNaN;
if altfp && op1_nan && op2_nan && op3_nan then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected
elsif altfp && op2_nan && (op1_nan || op3_nan) then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected
elsif altfp && op3_nan && op1_nan then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op3, fpcr, fpexc)); // <m> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc));
elsif type3 == FPType_SNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc));
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc));
elsif type3 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc));
else
done = FALSE; result =(E-1);
frac = '1': Zeros(); // 'Don't care' result
return (done, result);(F-1);
result = sign : exp : frac;
return result;
// FPMulX()
// ========
bits(N)// FPProcessDenorm()
// =================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode. FPMulX(bits(N) op1, bits(N) op2,FPProcessDenorm( FPType fptype, integer N, FPCRType fpcr)
assert N IN {16,32,64};
bits(N) result;
(type1,sign1,value1) = boolean altfp = FPUnpackHaveAltFP(op1, fpcr);
(type2,sign2,value2) =() && ! FPUnpackUsingAArch32(op2, fpcr);
(done,result) =() && fpcr.AH == '1';
if altfp && N != 16 && fptype == FPProcessNaNsFPType_Denormal(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==then FPType_InfinityFPProcessException);
inf2 = (type2 ==( FPType_InfinityFPExc_InputDenorm);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPTwo(sign1 EOR sign2);
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1*value2, fpcr);
FPProcessDenorms(type1, type2, N, fpcr);
return result;, fpcr);
// FPNeg()
// =======
bits(N)// FPProcessDenorms()
// ==================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode. FPNeg(bits(N) op)
assert N IN {16,32,64};
if !FPProcessDenorms(UsingAArch32FPType() &&type1, HaveAltFPFPType() thentype2, integer N,
FPCRType fpcr = FPCR[];
if fpcr.AH == '1' then
(fptype, -, -) =fpcr)
boolean altfp = FPUnpackHaveAltFP(op, fpcr, FALSE);
if fptype IN {() && !FPType_SNaNUsingAArch32,() && fpcr.AH == '1';
if altfp && N != 16 && (type1 == || type2 == FPType_Denormal) then
FPProcessException(FPExc_InputDenormFPType_QNaNFPType_Denormal} then
return op; // When fpcr.AH=1, sign of NaN has no consequence
return NOT(op<N-1>) : op<N-2:0>;, fpcr);
// FPOnePointFive()
// ================
bits(N)// FPProcessDenorms3()
// ===================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode. FPOnePointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':FPProcessDenorms3(OnesFPType(E-1);
frac = '1':type1, type2, FPType type3, integer N, FPCRType fpcr)
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
if altfp && N != 16 && (type1 == FPType_Denormal || type2 == FPType_Denormal ||
type3 == FPType_Denormal) then
FPProcessException(FPExc_InputDenormZerosFPType(F-1);
result = sign : exp : frac;
return result;, fpcr);
// FPProcessDenorm()
// =================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode.// FPProcessException()
// ====================
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
FPProcessDenorm(FPProcessException(FPTypeFPExc fptype, integer N,exception, FPCRType fpcr)
boolean altfp = // Determine the cumulative exception bit number
case exception of
when HaveAltFPFPExc_InvalidOp() && !cumul = 0;
whenUsingAArch32FPExc_DivideByZero() && fpcr.AH == '1';
if altfp && N != 16 && fptype ==cumul = 1;
when FPType_DenormalFPExc_Overflow thencumul = 2;
when
FPProcessExceptionFPExc_Underflow(cumul = 3;
whenFPExc_Inexact cumul = 4;
when FPExc_InputDenorm cumul = 7;
enable = cumul + 8;
if fpcr<enable> == '1' then
// Trapping of the exception enabled.
// It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and
// if so then how exceptions may be accumulated before calling FPTrappedException()
IMPLEMENTATION_DEFINED "floating-point trap handling";
elsif UsingAArch32, fpcr);() then
// Set the cumulative exception bit
FPSCR<cumul> = '1';
else
// Set the cumulative exception bit
FPSR<cumul> = '1';
return;
// FPProcessDenorms()
// ==================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode.// FPProcessNaN()
// ==============
bits(N)
FPProcessDenorms(FPProcessNaN(FPType type1,fptype, bits(N) op, FPCRType fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
return FPProcessNaN(fptype, op, fpcr, fpexc);
// FPProcessNaN()
// ==============
// Handle NaN input operands, returning the operand or default NaN value
// if fpcr.DN is selected. The 'fpcr' argument supplies the FPCR control bits.
// The 'fpexc' argument controls the generation of exceptions, regardless of
// whether 'fptype' is a signalling NaN or a quiet NaN.
bits(N) FPProcessNaN(FPType type2, integer N,fptype, bits(N) op, FPCRType fpcr)
boolean altfp =fpcr, boolean fpexc)
assert N IN {16,32,64};
assert fptype IN { HaveAltFPFPType_QNaN() && !,UsingAArch32FPType_SNaN() && fpcr.AH == '1';
if altfp && N != 16 && (type1 ==};
case N of
when 16 topfrac = 9;
when 32 topfrac = 22;
when 64 topfrac = 51;
result = op;
if fptype == FPType_DenormalFPType_SNaN || type2 ==then
result<topfrac> = '1';
if fpexc then FPType_Denormal) then
FPProcessException(, fpcr);
if fpcr.DN == '1' then // DefaultNaN requested
result = FPDefaultNaNFPExc_InputDenormFPExc_InvalidOp, fpcr);();
return result;
// FPProcessDenorms3()
// ===================
// Handles denormal input in case of single-precision or double-precision
// when using alternative floating-point mode.// FPProcessNaNs()
// ===============
(boolean, bits(N))
FPProcessDenorms3(FPProcessNaNs(FPType type1, FPType type2,type2, bits(N) op1,
bits(N) op2, FPCRType fpcr)
boolean altfmaxfmin = FALSE; // Do not use alfp mode for FMIN, FMAX and variants
boolean fpexc = TRUE; // Generate floating-point exceptions
return FPProcessNaNs(type1, type2, op1, op2, fpcr, altfmaxfmin, fpexc);
// FPProcessNaNs()
// ===============
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits and 'altfmaxfmin' controls
// alternative floating-point behaviour for FMAX, FMIN and variants. 'fpexc'
// controls the generation of floating-point exceptions. Status information
// is updated directly in the FPSR where appropriate.
(boolean, bits(N)) FPProcessNaNs(FPType type3, integer N,type1, FPType type2, bits(N) op1, bits(N) op2,
FPCRType fpcr)
boolean altfp =fpcr, boolean altfmaxfmin, boolean fpexc)
assert N IN {16,32,64};
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
if altfp && N != 16 && (type1 == boolean op1_nan = type1 IN { FPType_DenormalFPType_SNaN || type2 ==, FPType_DenormalFPType_QNaN ||
type3 ==};
boolean op2_nan = type2 IN { FPType_DenormalFPType_SNaN) then,
FPType_QNaN};
boolean any_snan = type1 == FPType_SNaN || type2 == FPType_SNaN;
FPType type_nan = if any_snan then FPType_SNaN else FPType_QNaN;
if altfmaxfmin && (op1_nan || op2_nan) then
FPProcessException(, fpcr);
done = TRUE; sign2 = op2<N-1>;
result = if type2 == FPType_Zero then FPZero(sign2) else op2;
elsif altfp && op1_nan && op2_nan then
done = TRUE; result = FPProcessNaN(type_nan, op1, fpcr, fpexc); // <n> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
else
done = FALSE; result = ZerosFPExc_InputDenormFPExc_InvalidOp, fpcr);(); // 'Don't care' result
return (done, result);
// FPProcessException()
// ====================
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.// FPProcessNaNs3()
// ================
(boolean, bits(N))
FPProcessException(FPProcessNaNs3(FPExcFPType exception,type1, FPType type2, FPType type3,
bits(N) op1, bits(N) op2, bits(N) op3,
FPCRType fpcr)
// Determine the cumulative exception bit number
case exception of
when boolean fpexc = TRUE; // Generate floating-point exceptions
return FPExc_InvalidOpFPProcessNaNs3 cumul = 0;
when(type1, type2, type3, op1, op2, op3, fpcr, fpexc);
// FPProcessNaNs3()
// ================
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
(boolean, bits(N)) FPExc_DivideByZero cumul = 1;
whenFPProcessNaNs3( FPExc_OverflowFPType cumul = 2;
whentype1, FPExc_UnderflowFPType cumul = 3;
whentype2, FPExc_InexactFPType cumul = 4;
whentype3,
bits(N) op1, bits(N) op2, bits(N) op3, FPExc_InputDenormFPCRType cumul = 7;
enable = cumul + 8;
if fpcr<enable> == '1' then
// Trapping of the exception enabled.
// It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and
// if so then how exceptions may be accumulated before calling FPTrappedException()
IMPLEMENTATION_DEFINED "floating-point trap handling";
elsiffpcr, boolean fpexc)
assert N IN {16,32,64};
boolean op1_nan = type1 IN { FPType_SNaN, FPType_QNaN};
boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN};
boolean op3_nan = type3 IN {FPType_SNaN, FPType_QNaN};
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
if altfp then
if type1 == FPType_SNaN || type2 == FPType_SNaN || type3 == FPType_SNaN then
type_nan = FPType_SNaN;
else
type_nan = FPType_QNaN;
if altfp && op1_nan && op2_nan && op3_nan then
done = TRUE; result = FPProcessNaN(type_nan, op2, fpcr, fpexc); // <n> register NaN selected
elsif altfp && op2_nan && (op1_nan || op3_nan) then
done = TRUE; result = FPProcessNaN(type_nan, op2, fpcr, fpexc); // <n> register NaN selected
elsif altfp && op3_nan && op1_nan then
done = TRUE; result = FPProcessNaN(type_nan, op3, fpcr, fpexc); // <m> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type3 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type3 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc);
else
done = FALSE; result = Zeros() then
// Set the cumulative exception bit
FPSCR<cumul> = '1';
else
// Set the cumulative exception bit
FPSR<cumul> = '1';
return;(); // 'Don't care' result
return (done, result);
// FPProcessNaN()
// ==============
// FPRecipEstimate()
// =================
bits(N) FPProcessNaN(FPRecipEstimate(bits(N) operand,FPType fptype, bits(N) op, FPCRType fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
return assert N IN {16,32,64};
// When using alternative floating-point behaviour, do not generate
// floating-point exceptions, flush denormal input and output to zero,
// and use RNE rounding mode.
boolean altfp = FPProcessNaNHaveAltFP(fptype, op, fpcr, fpexc);
// FPProcessNaN()
// ==============
// Handle NaN input operands, returning the operand or default NaN value
// if fpcr.DN is selected. The 'fpcr' argument supplies the FPCR control bits.
// The 'fpexc' argument controls the generation of exceptions, regardless of
// whether 'fptype' is a signalling NaN or a quiet NaN.
bits(N)() && ! FPProcessNaN(() && fpcr.AH == '1';
boolean fpexc = !altfp;
if altfp then fpcr.<FIZ,FZ> = '11';
if altfp then fpcr.RMode = '00';
(fptype,sign,value) =FPTypeFPUnpack fptype, bits(N) op,(operand, fpcr, fpexc); FPCRTypeFPRounding fpcr, boolean fpexc)
assert N IN {16,32,64};
assert fptype IN {rounding =FPType_QNaNFPRoundingMode,(fpcr);
if fptype == FPType_SNaN};
case N of
when 16 topfrac = 9;
when 32 topfrac = 22;
when 64 topfrac = 51;
result = op;
if fptype ==|| fptype == FPType_SNaNFPType_QNaN then
result<topfrac> = '1';
if fpexc then result = FPProcessNaN(fptype, operand, fpcr, fpexc);
elsif fptype == FPType_Infinity then
result = FPZero(sign);
elsif fptype == FPType_Zero then
result = FPInfinity(sign);
if fpexc then FPProcessException(FPExc_InvalidOpFPExc_DivideByZero, fpcr);
if fpcr.DN == '1' then // DefaultNaN requested
result = elsif (
(N == 16 && (value) < 2.0^-16) ||
(N == 32 && Abs(value) < 2.0^-128) ||
(N == 64 && Abs(value) < 2.0^-1024)
) then
case rounding of
when FPRounding_TIEEVEN
overflow_to_inf = TRUE;
when FPRounding_POSINF
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
overflow_to_inf = (sign == '1');
when FPRounding_ZERO
overflow_to_inf = FALSE;
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
if fpexc then
FPProcessException(FPExc_Overflow, fpcr);
FPProcessException(FPExc_Inexact, fpcr);
elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16))
&& (
(N == 16 && Abs(value) >= 2.0^14) ||
(N == 32 && Abs(value) >= 2.0^126) ||
(N == 64 && Abs(value) >= 2.0^1022)
) then
// Result flushed to zero of correct sign
result = FPZero(sign);
// Flush-to-zero never generates a trapped exception.
if UsingAArch32() then
FPSCR.UFC = '1';
else
if fpexc then FPSR.UFC = '1';
else
// Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and
// calculate result exponent. Scaled value has copied sign bit,
// exponent = 1022 = double-precision biased version of -1,
// fraction = original fraction
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
if fraction<51> == '0' then
exp = -1;
fraction = fraction<49:0>:'00';
else
fraction = fraction<50:0>:'0';
integer scaled;
boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp;
if !increasedprecision then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('1':fraction<51:41>);
case N of
when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30
when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254
when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046
// Scaled is in range 256 .. 511 or 2048 .. 4095 range representing a
// fixed-point number in range [0.5 .. 1.0].
estimate = RecipEstimate(scaled, increasedprecision);
// Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a
// fixed-point result in the range [1.0 .. 2.0].
// Convert to scaled floating point result with copied sign bit,
// high-order bits from estimate, and exponent calculated above.
if !increasedprecision then
fraction = estimate<7:0> : Zeros(44);
else
fraction = estimate<11:0> : ZerosFPDefaultNaNAbs();
(40);
if result_exp == 0 then
fraction = '1' : fraction<51:1>;
elsif result_exp == -1 then
fraction = '01' : fraction<51:2>;
result_exp = 0;
case N of
when 16 result = sign : result_exp<N-12:0> : fraction<51:42>;
when 32 result = sign : result_exp<N-25:0> : fraction<51:29>;
when 64 result = sign : result_exp<N-54:0> : fraction<51:0>;
return result;
// FPProcessNaNs()
// RecipEstimate()
// ===============
// Compute estimate of reciprocal of 9-bit fixed-point number.
//
// a is in range 256 .. 511 or 2048 .. 4096 representing a number in
// the range 0.5 <= x < 1.0.
// increasedprecision determines if the mantissa is 8-bit or 12-bit.
// result is in the range 256 .. 511 or 4096 .. 8191 representing a
// number in the range 1.0 to 511/256 or 1.00 to 8191/4096.
(boolean, bits(N))integer FPProcessNaNs(RecipEstimate(integer a, boolean increasedprecision)
integer r;
if !increasedprecision then
assert 256 <= a && a < 512;
a = a*2+1; // Round to nearest
integer b = (2 ^ 19) DIV a;
r = (b+1) DIV 2; // Round to nearest
assert 256 <= r && r < 512;
else
assert 2048 <= a && a < 4096;
a = a*2+1; // Round to nearest
real real_val = Real(2^25)/Real(a);
r =FPTypeRoundDown type1, FPType type2, bits(N) op1,
bits(N) op2, FPCRType fpcr)
boolean altfmaxfmin = FALSE; // Do not use alfp mode for FMIN, FMAX and variants
boolean fpexc = TRUE; // Generate floating-point exceptions
return FPProcessNaNs(type1, type2, op1, op2, fpcr, altfmaxfmin, fpexc);
// FPProcessNaNs()
// ===============
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits and 'altfmaxfmin' controls
// alternative floating-point behaviour for FMAX, FMIN and variants. 'fpexc'
// controls the generation of floating-point exceptions. Status information
// is updated directly in the FPSR where appropriate.
(boolean, bits(N)) FPProcessNaNs(FPType type1, FPType type2, bits(N) op1, bits(N) op2,
FPCRType fpcr, boolean altfmaxfmin, boolean fpexc)
assert N IN {16,32,64};
boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN};
boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN};
boolean any_snan = type1 == FPType_SNaN || type2 == FPType_SNaN;
FPType type_nan = if any_snan then FPType_SNaN else FPType_QNaN;
if altfmaxfmin && (op1_nan || op2_nan) then
FPProcessException(FPExc_InvalidOp, fpcr);
done = TRUE; sign2 = op2<N-1>;
result = if type2 == FPType_Zero then FPZero(sign2) else op2;
elsif altfp && op1_nan && op2_nan then
done = TRUE; result = FPProcessNaN(type_nan, op1, fpcr, fpexc); // <n> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
else
done = FALSE; result = Zeros(); // 'Don't care' result
return (done, result);(real_val);
real error = real_val - Real(r);
boolean round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case
if round_up then r = r+1;
assert 4096 <= r && r < 8192;
return r;
// FPProcessNaNs3()
// ================
// FPRecpX()
// =========
(boolean, bits(N))bits(N) FPProcessNaNs3(FPRecpX(bits(N) op,FPType type1, FPType type2, FPType type3,
bits(N) op1, bits(N) op2, bits(N) op3,
FPCRType fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
return assert N IN {16,32,64};
case N of
when 16 esize = 5;
when 32 esize = 8;
when 64 esize = 11;
bits(N) result;
bits(esize) exp;
bits(esize) max_exp;
bits(N-(esize+1)) frac = FPProcessNaNs3Zeros(type1, type2, type3, op1, op2, op3, fpcr, fpexc);
();
// FPProcessNaNs3()
// ================
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
(boolean, bits(N)) boolean altfp = FPProcessNaNs3(FPType type1, FPType type2, FPType type3,
bits(N) op1, bits(N) op2, bits(N) op3,
FPCRType fpcr, boolean fpexc)
assert N IN {16,32,64};
boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN};
boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN};
boolean op3_nan = type3 IN {FPType_SNaN, FPType_QNaN};
boolean altfp = HaveAltFP() && !() && fpcr.AH == '1';
boolean fpexc = !altfp; // Generate no floating-point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
(fptype,sign,value) =UsingAArch32FPUnpack() && fpcr.AH == '1';
if altfp then
if type1 ==(op, fpcr, fpexc);
case N of
when 16 exp = op<10+esize-1:10>;
when 32 exp = op<23+esize-1:23>;
when 64 exp = op<52+esize-1:52>;
max_exp = FPType_SNaNOnes || type2 ==(esize) - 1;
if fptype == FPType_SNaN || type3 ==|| fptype == FPType_SNaN then
type_nan = FPType_SNaN;
else
type_nan = FPType_QNaN;
if altfp && op1_nan && op2_nan && op3_nan then
done = TRUE; result =then
result = FPProcessNaN(type_nan, op2, fpcr, fpexc); // <n> register NaN selected
elsif altfp && op2_nan && (op1_nan || op3_nan) then
done = TRUE; result =(fptype, op, fpcr, fpexc);
else
if FPProcessNaNIsZero(type_nan, op2, fpcr, fpexc); // <n> register NaN selected
elsif altfp && op3_nan && op1_nan then
done = TRUE; result = FPProcessNaN(type_nan, op3, fpcr, fpexc); // <m> register NaN selected
elsif type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type3 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc);
elsif type3 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc);
else
done = FALSE; result = Zeros(); // 'Don't care' result
(exp) then // Zero and denormals
result = sign:max_exp:frac;
else // Infinities and normals
result = sign:NOT(exp):frac;
return (done, result); return result;
// FPRecipEstimate()
// =================
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
bits(N) FPRecipEstimate(bits(N) operand,FPRound(real op, FPCRType fpcr)
assert N IN {16,32,64};
// When using alternative floating-point behaviour, do not generate
// floating-point exceptions, flush denormal input and output to zero,
// and use RNE rounding mode.
boolean altfp =fpcr, HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
boolean fpexc = !altfp;
if altfp then fpcr.<FIZ,FZ> = '11';
if altfp then fpcr.RMode = '00';
(fptype,sign,value) = FPUnpack(operand, fpcr, fpexc);
FPRounding rounding =rounding)
fpcr.AHP = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
boolean isbfloat16 = FALSE;
return FPRoundingModeFPRoundBase(fpcr);
if fptype ==(op, fpcr, rounding, isbfloat16, fpexc);
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
bits(N) FPType_SNaN || fptype ==FPRound(real op, FPType_QNaNFPCRType then
result =fpcr, FPProcessNaNFPRounding(fptype, operand, fpcr, fpexc);
elsif fptype ==rounding, boolean fpexc)
fpcr.AHP = '0';
boolean isbfloat16 = FALSE;
return FPType_InfinityFPRoundBase then
result =(op, fpcr, rounding, isbfloat16, fpexc);
// FPRound()
// =========
bits(N) FPZero(sign);
elsif fptype ==FPRound(real op, FPType_ZeroFPCRType then
result =fpcr)
return FPInfinityFPRound(sign);
if fpexc then(op, fpcr, FPProcessExceptionFPRoundingMode(FPExc_DivideByZero, fpcr);
elsif (
(N == 16 && Abs(value) < 2.0^-16) ||
(N == 32 && Abs(value) < 2.0^-128) ||
(N == 64 && Abs(value) < 2.0^-1024)
) then
case rounding of
when FPRounding_TIEEVEN
overflow_to_inf = TRUE;
when FPRounding_POSINF
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
overflow_to_inf = (sign == '1');
when FPRounding_ZERO
overflow_to_inf = FALSE;
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
if fpexc then
FPProcessException(FPExc_Overflow, fpcr);
FPProcessException(FPExc_Inexact, fpcr);
elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16))
&& (
(N == 16 && Abs(value) >= 2.0^14) ||
(N == 32 && Abs(value) >= 2.0^126) ||
(N == 64 && Abs(value) >= 2.0^1022)
) then
// Result flushed to zero of correct sign
result = FPZero(sign);
// Flush-to-zero never generates a trapped exception.
if UsingAArch32() then
FPSCR.UFC = '1';
else
if fpexc then FPSR.UFC = '1';
else
// Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and
// calculate result exponent. Scaled value has copied sign bit,
// exponent = 1022 = double-precision biased version of -1,
// fraction = original fraction
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
if fraction<51> == '0' then
exp = -1;
fraction = fraction<49:0>:'00';
else
fraction = fraction<50:0>:'0';
integer scaled;
boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp;
if !increasedprecision then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('1':fraction<51:41>);
case N of
when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30
when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254
when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046
// Scaled is in range 256 .. 511 or 2048 .. 4095 range representing a
// fixed-point number in range [0.5 .. 1.0].
estimate = RecipEstimate(scaled, increasedprecision);
// Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a
// fixed-point result in the range [1.0 .. 2.0].
// Convert to scaled floating point result with copied sign bit,
// high-order bits from estimate, and exponent calculated above.
if !increasedprecision then
fraction = estimate<7:0> : Zeros(44);
else
fraction = estimate<11:0> : Zeros(40);
if result_exp == 0 then
fraction = '1' : fraction<51:1>;
elsif result_exp == -1 then
fraction = '01' : fraction<51:2>;
result_exp = 0;
case N of
when 16 result = sign : result_exp<N-12:0> : fraction<51:42>;
when 32 result = sign : result_exp<N-25:0> : fraction<51:29>;
when 64 result = sign : result_exp<N-54:0> : fraction<51:0>;
return result;(fpcr));
// RecipEstimate()
// ===============
// Compute estimate of reciprocal of 9-bit fixed-point number.
//
// a is in range 256 .. 511 or 2048 .. 4096 representing a number in
// the range 0.5 <= x < 1.0.
// increasedprecision determines if the mantissa is 8-bit or 12-bit.
// result is in the range 256 .. 511 or 4096 .. 8191 representing a
// number in the range 1.0 to 511/256 or 1.00 to 8191/4096.
// FPRoundBase()
// =============
integerbits(N) RecipEstimate(integer a, boolean increasedprecision)
integer r;
if !increasedprecision then
assert 256 <= a && a < 512;
a = a*2+1; // Round to nearest
integer b = (2 ^ 19) DIV a;
r = (b+1) DIV 2; // Round to nearest
assert 256 <= r && r < 512;
else
assert 2048 <= a && a < 4096;
a = a*2+1; // Round to nearest
real real_val = Real(2^25)/Real(a);
r =FPRoundBase(real op, FPCRType fpcr, FPRounding rounding, boolean isbfloat16)
boolean fpexc = TRUE; // Generate floating-point exceptions
return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc);
// FPRoundBase()
// =============
// Convert a real number OP into an N-bit floating-point value using the
// supplied rounding mode RMODE.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
bits(N) FPRoundBase(real op, FPCRType fpcr, FPRounding rounding,
boolean isbfloat16, boolean fpexc)
assert N IN {16,32,64};
assert op != 0.0;
assert rounding != FPRounding_TIEAWAY;
bits(N) result;
// Obtain format parameters - minimum exponent, numbers of exponent and fraction bits.
if N == 16 then
minimum_exp = -14; E = 5; F = 10;
elsif N == 32 && isbfloat16 then
minimum_exp = -126; E = 8; F = 7;
elsif N == 32 then
minimum_exp = -126; E = 8; F = 23;
else // N == 64
minimum_exp = -1022; E = 11; F = 52;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
// When TRUE, detection of underflow occurs after rounding and the test for a
// denormalized number for single and double precision values occurs after rounding.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
// Deal with flush-to-zero before rounding if FPCR.AH != '1'.
if (!altfp && ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) &&
exponent < minimum_exp) then
// Flush-to-zero never generates a trapped exception.
if UsingAArch32() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
return FPZero(sign);
biased_exp_unconstrained = exponent - minimum_exp + 1;
int_mant_unconstrained = RoundDown(mantissa * 2.0^F);
error_unconstrained = mantissa * 2.0^F - Real(int_mant_unconstrained);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp = Max(exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
// Underflow occurs if exponent is too small before rounding, and result is inexact or
// the Underflow exception is trapped. This applies before rounding if FPCR.AH != '1'.
if !altfp && biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then
if fpexc then FPProcessException(FPExc_Underflow, fpcr);
// Round result according to rounding mode.
if altfp then
case rounding of
when FPRounding_TIEEVEN
round_up_unconstrained = (error_unconstrained > 0.5 ||
(error_unconstrained == 0.5 && int_mant_unconstrained<0> == '1'));
round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
when FPRounding_POSINF
round_up_unconstrained = (error_unconstrained != 0.0 && sign == '0');
round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
round_up_unconstrained = (error_unconstrained != 0.0 && sign == '1');
round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
when FPRounding_ZERO, FPRounding_ODD
round_up_unconstrained = FALSE;
round_up = FALSE;
overflow_to_inf = FALSE;
if round_up_unconstrained then
int_mant_unconstrained = int_mant_unconstrained + 1;
if int_mant_unconstrained == 2^(F+1) then // Rounded up to next exponent
biased_exp_unconstrained = biased_exp_unconstrained + 1;
int_mant_unconstrained = int_mant_unconstrained DIV 2;
// Deal with flush-to-zero and underflow after rounding if FPCR.AH == '1'.
if biased_exp_unconstrained < 1 && int_mant_unconstrained != 0 then
// the result of unconstrained rounding is less than the minimum normalized number
if (fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16) then // Flush-to-zero
if fpexc then
FPSR.UFC = '1';
FPProcessException(FPExc_Inexact, fpcr);
return FPZero(sign);
elsif error != 0.0 || fpcr.UFE == '1' then
if fpexc then FPProcessException(FPExc_Underflow, fpcr);
else // altfp == FALSE
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
when FPRounding_POSINF
round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
when FPRounding_ZERO, FPRounding_ODD
round_up = FALSE;
overflow_to_inf = FALSE;
if round_up then
int_mant = int_mant + 1;
if int_mant == 2^F then // Rounded up from denormalized to normalized
biased_exp = 1;
if int_mant == 2^(F+1) then // Rounded up to next exponent
biased_exp = biased_exp + 1;
int_mant = int_mant DIV 2;
// Handle rounding to odd
if error != 0.0 && rounding == FPRounding_ODD then
int_mant<0> = '1';
// Deal with overflow and generate result.
if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision
if biased_exp >= 2^E - 1 then
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
if fpexc then FPProcessException(FPExc_Overflow, fpcr);
error = 1.0; // Ensure that an Inexact exception occurs
else
result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1));
else // Alternative half precision
if biased_exp >= 2^E then
result = sign : Ones(N-1);
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
error = 0.0; // Ensure that an Inexact exception does not occur
else
result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1));
// Deal with Inexact exception.
if error != 0.0 then
if fpexc then FPProcessException(FPExc_Inexact(real_val);
real error = real_val - Real(r);
boolean round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case
if round_up then r = r+1;
assert 4096 <= r && r < 8192;
, fpcr);
return r; return result;
// FPRecpX()
// =========
// FPRoundCV()
// ===========
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
bits(N) FPRecpX(bits(N) op,FPRoundCV(real op, FPCRType fpcr)
assert N IN {16,32,64};
case N of
when 16 esize = 5;
when 32 esize = 8;
when 64 esize = 11;
bits(N) result;
bits(esize) exp;
bits(esize) max_exp;
bits(N-(esize+1)) frac =fpcr, ZerosFPRounding();
boolean altfp =rounding)
fpcr.FZ16 = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
boolean isbfloat16 = FALSE;
return HaveAltFPFPRoundBase() && fpcr.AH == '1';
boolean fpexc = !altfp; // Generate no floating-point exceptions
if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
case N of
when 16 exp = op<10+esize-1:10>;
when 32 exp = op<23+esize-1:23>;
when 64 exp = op<52+esize-1:52>;
max_exp = Ones(esize) - 1;
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr, fpexc);
else
if IsZero(exp) then // Zero and denormals
result = sign:max_exp:frac;
else // Infinities and normals
result = sign:NOT(exp):frac;
return result;(op, fpcr, rounding, isbfloat16, fpexc);
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
bits(N)enumeration FPRound(real op,FPRounding { FPCRType fpcr,FPRounding_TIEEVEN, FPRounding rounding)
fpcr.AHP = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
boolean isbfloat16 = FALSE;
returnFPRounding_POSINF, FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc);
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
bits(N)FPRounding_NEGINF, FPRound(real op,FPRounding_ZERO, FPCRType fpcr,FPRounding_TIEAWAY, FPRounding rounding, boolean fpexc)
fpcr.AHP = '0';
boolean isbfloat16 = FALSE;
return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc);
// FPRound()
// =========
bits(N) FPRound(real op, FPCRType fpcr)
return FPRound(op, fpcr, FPRoundingMode(fpcr));FPRounding_ODD};
// FPRoundBase()
// =============
// FPRoundingMode()
// ================
bits(N)// Return the current floating-point rounding mode.
FPRounding FPRoundBase(real op,FPRoundingMode( FPCRType fpcr,fpcr)
return FPRoundingFPDecodeRounding rounding, boolean isbfloat16)
boolean fpexc = TRUE; // Generate floating-point exceptions
return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc);
// FPRoundBase()
// =============
// Convert a real number OP into an N-bit floating-point value using the
// supplied rounding mode RMODE.
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
bits(N) FPRoundBase(real op, FPCRType fpcr, FPRounding rounding,
boolean isbfloat16, boolean fpexc)
assert N IN {16,32,64};
assert op != 0.0;
assert rounding != FPRounding_TIEAWAY;
bits(N) result;
// Obtain format parameters - minimum exponent, numbers of exponent and fraction bits.
if N == 16 then
minimum_exp = -14; E = 5; F = 10;
elsif N == 32 && isbfloat16 then
minimum_exp = -126; E = 8; F = 7;
elsif N == 32 then
minimum_exp = -126; E = 8; F = 23;
else // N == 64
minimum_exp = -1022; E = 11; F = 52;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
// When TRUE, detection of underflow occurs after rounding and the test for a
// denormalized number for single and double precision values occurs after rounding.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
// Deal with flush-to-zero before rounding if FPCR.AH != '1'.
if (!altfp && ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) &&
exponent < minimum_exp) then
// Flush-to-zero never generates a trapped exception.
if UsingAArch32() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
return FPZero(sign);
biased_exp_unconstrained = exponent - minimum_exp + 1;
int_mant_unconstrained = RoundDown(mantissa * 2.0^F);
error_unconstrained = mantissa * 2.0^F - Real(int_mant_unconstrained);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp = Max(exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
// Underflow occurs if exponent is too small before rounding, and result is inexact or
// the Underflow exception is trapped. This applies before rounding if FPCR.AH != '1'.
if !altfp && biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then
if fpexc then FPProcessException(FPExc_Underflow, fpcr);
// Round result according to rounding mode.
if altfp then
case rounding of
when FPRounding_TIEEVEN
round_up_unconstrained = (error_unconstrained > 0.5 ||
(error_unconstrained == 0.5 && int_mant_unconstrained<0> == '1'));
round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
when FPRounding_POSINF
round_up_unconstrained = (error_unconstrained != 0.0 && sign == '0');
round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
round_up_unconstrained = (error_unconstrained != 0.0 && sign == '1');
round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
when FPRounding_ZERO, FPRounding_ODD
round_up_unconstrained = FALSE;
round_up = FALSE;
overflow_to_inf = FALSE;
if round_up_unconstrained then
int_mant_unconstrained = int_mant_unconstrained + 1;
if int_mant_unconstrained == 2^(F+1) then // Rounded up to next exponent
biased_exp_unconstrained = biased_exp_unconstrained + 1;
int_mant_unconstrained = int_mant_unconstrained DIV 2;
// Deal with flush-to-zero and underflow after rounding if FPCR.AH == '1'.
if biased_exp_unconstrained < 1 && int_mant_unconstrained != 0 then
// the result of unconstrained rounding is less than the minimum normalized number
if (fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16) then // Flush-to-zero
if fpexc then
FPSR.UFC = '1';
FPProcessException(FPExc_Inexact, fpcr);
return FPZero(sign);
elsif error != 0.0 || fpcr.UFE == '1' then
if fpexc then FPProcessException(FPExc_Underflow, fpcr);
else // altfp == FALSE
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
when FPRounding_POSINF
round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
when FPRounding_ZERO, FPRounding_ODD
round_up = FALSE;
overflow_to_inf = FALSE;
if round_up then
int_mant = int_mant + 1;
if int_mant == 2^F then // Rounded up from denormalized to normalized
biased_exp = 1;
if int_mant == 2^(F+1) then // Rounded up to next exponent
biased_exp = biased_exp + 1;
int_mant = int_mant DIV 2;
// Handle rounding to odd
if error != 0.0 && rounding == FPRounding_ODD then
int_mant<0> = '1';
// Deal with overflow and generate result.
if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision
if biased_exp >= 2^E - 1 then
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
if fpexc then FPProcessException(FPExc_Overflow, fpcr);
error = 1.0; // Ensure that an Inexact exception occurs
else
result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1));
else // Alternative half precision
if biased_exp >= 2^E then
result = sign : Ones(N-1);
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
error = 0.0; // Ensure that an Inexact exception does not occur
else
result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1));
// Deal with Inexact exception.
if error != 0.0 then
if fpexc then FPProcessException(FPExc_Inexact, fpcr);
return result;(fpcr.RMode);
// FPRoundCV()
// ===========
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
// FPRoundInt()
// ============
// Round op to nearest integral floating point value using rounding mode in FPCR/FPSCR.
// If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to op.
bits(N) FPRoundCV(real op,FPRoundInt(bits(N) op, FPCRType fpcr, FPRounding rounding)
fpcr.FZ16 = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
boolean isbfloat16 = FALSE;
returnrounding, boolean exact)
assert rounding != ;
assert N IN {16,32,64};
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using FPCR to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// Extract integer component.
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0);
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Convert integer value into an equivalent real value.
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact.
if real_result == 0.0 then
result = FPZero(sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions.
if error != 0.0 && exact then
FPProcessException(FPExc_InexactFPRoundBaseFPRounding_ODD(op, fpcr, rounding, isbfloat16, fpexc);, fpcr);
return result;
enumeration// FPRoundIntN()
// =============
bits(N) FPRounding {FPRoundIntN(bits(N) op,FPRounding_TIEEVEN,fpcr, FPRounding_POSINF,rounding, integer intsize)
assert rounding !=
FPRounding_NEGINF,;
assert N IN {32,64};
assert intsize IN {32, 64};
integer exp;
constant integer E = (if N == 32 then 8 else 11);
constant integer F = N - (E + 1);
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp = FPRounding_ZERO,() && !
FPRounding_TIEAWAY,() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using FPCR to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = (op, fpcr, fpexc);
if fptype IN {FPType_SNaN, FPType_QNaN, FPType_Infinity} then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022+intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// Extract integer component.
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when FPRounding_TIEEVEN
round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1');
when FPRounding_POSINF
round_up = error != 0.0;
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = error != 0.0 && int_result < 0;
when FPRounding_TIEAWAY
round_up = error > 0.5 || (error == 0.5 && int_result >= 0);
if round_up then int_result = int_result + 1;
overflow = int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1);
if overflow then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
// This case shouldn't set Inexact.
error = 0.0;
else
// Convert integer value into an equivalent real value.
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact.
if real_result == 0.0 then
result = FPZero(sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions.
if error != 0.0 then
FPProcessException(FPExc_InexactFPRounding_ODD};, fpcr);
return result;
// FPRoundingMode()
// ================
// FPRSqrtEstimate()
// =================
// Return the current floating-point rounding mode.
FPRoundingbits(N) FPRoundingMode(FPRSqrtEstimate(bits(N) operand,FPCRType fpcr)
return assert N IN {16,32,64};
// When using alternative floating-point behaviour, do not generate
// floating-point exceptions and flush denormal input to zero.
boolean altfp = () && !UsingAArch32() && fpcr.AH == '1';
boolean fpexc = !altfp;
if altfp then fpcr.<FIZ,FZ> = '11';
(fptype,sign,value) = FPUnpack(operand, fpcr, fpexc);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, operand, fpcr, fpexc);
elsif fptype == FPType_Zero then
result = FPInfinity(sign);
if fpexc then FPProcessException(FPExc_DivideByZero, fpcr);
elsif sign == '1' then
result = FPDefaultNaN();
if fpexc then FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Infinity then
result = FPZero('0');
else
// Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the
// evenness or oddness of the exponent unchanged, and calculate result exponent.
// Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision
// biased version of -1 or -2, fraction = original fraction extended with zeros.
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
while fraction<51> == '0' do
fraction = fraction<50:0> : '0';
exp = exp - 1;
fraction = fraction<50:0> : '0';
integer scaled;
boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp;
if !increasedprecision then
if exp<0> == '0' then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('01':fraction<51:45>);
else
if exp<0> == '0' then
scaled = UInt('1':fraction<51:41>);
else
scaled = UInt('01':fraction<51:42>);
case N of
when 16 result_exp = ( 44 - exp) DIV 2;
when 32 result_exp = ( 380 - exp) DIV 2;
when 64 result_exp = (3068 - exp) DIV 2;
estimate = RecipSqrtEstimate(scaled, increasedprecision);
// Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a
// fixed-point result in the range [1.0 .. 2.0].
// Convert to scaled floating point result with copied sign bit and high-order
// fraction bits, and exponent calculated above.
case N of
when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros(2);
when 32
if !increasedprecision then
result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15);
else
result = '0' : result_exp<N-25:0> : estimate<11:0>:Zeros(11);
when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:ZerosFPDecodeRoundingHaveAltFP(fpcr.RMode);(44);
return result;
// FPRoundInt()
// ============
// RecipSqrtEstimate()
// ===================
// Compute estimate of reciprocal square root of 9-bit fixed-point number.
//
// a is in range 128 .. 511 or 1024 .. 4095, with increased precision,
// representing a number in the range 0.25 <= x < 1.0.
// increasedprecision determines if the mantissa is 8-bit or 12-bit.
// result is in the range 256 .. 511 or 4096 .. 8191, with increased precision,
// representing a number in the range 1.0 to 511/256 or 8191/4096.
// Round op to nearest integral floating point value using rounding mode in FPCR/FPSCR.
// If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to op.
bits(N)integer FPRoundInt(bits(N) op,RecipSqrtEstimate(integer a, boolean increasedprecision)
integer r;
if !increasedprecision then
assert 128 <= a && a < 512;
if a < 256 then // 0.25 .. 0.5
a = a*2+1; // a in units of 1/512 rounded to nearest
else // 0.5 .. 1.0
a = (a >> 1) << 1; // Discard bottom bit
a = (a+1)*2; // a in units of 1/256 rounded to nearest
integer b = 512;
while a*(b+1)*(b+1) < 2^28 do
b = b+1;
// b = largest b such that b < 2^14 / sqrt(a)
r = (b+1) DIV 2; // Round to nearest
assert 256 <= r && r < 512;
else
assert 1024 <= a && a < 4096;
real real_val;
real error;
integer int_val;
if a < 2048 then // 0.25... 0.5
a = a*2 + 1; // Take 10 bits of fraction and force a 1 at the bottom
real_val = Real(a)/2.0;
else // 0.5..1.0
a = (a >> 1) << 1; // Discard bottom bit
a = a+1; // Taking 10 bits of the fraction and force a 1 at the bottom
real_val = Real(a);
real_val = Sqrt(real_val); // This number will lie in the range of 32 to 64
// Round to nearest even for a DP float number
real_val = real_val * Real(2^47); // The integer is the size of the whole DP mantissa
int_val = FPCRTypeRoundDown fpcr,(real_val); // Calculate rounding value
error = real_val - Real(int_val);
round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case
if round_up then int_val = int_val+1;
real_val = Real(2^65)/Real(int_val); // Lies in the range 4096 <= real_val < 8192
int_val = FPRounding rounding, boolean exact)
assert rounding != FPRounding_ODD;
assert N IN {16,32,64};
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using FPCR to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// Extract integer component.
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0);
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Convert integer value into an equivalent real value.
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact.
if real_result == 0.0 then
result = FPZero(sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions.
if error != 0.0 && exact then
FPProcessException(FPExc_Inexact, fpcr);
(real_val); // Round that (to nearest even) to give integer
error = real_val - Real(int_val);
round_up = (error > 0.5 || (error == 0.5 && int_val<0> == '1'));
if round_up then int_val = int_val+1;
return result; r = int_val;
assert 4096 <= r && r < 8192;
return r;
// FPRoundIntN()
// =============
// FPSqrt()
// ========
bits(N) FPRoundIntN(bits(N) op,FPSqrt(bits(N) op, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPRounding rounding, integer intsize)
assert rounding != FPRounding_ODD;
assert N IN {32,64};
assert intsize IN {32, 64};
integer exp;
constant integer E = (if N == 32 then 8 else 11);
constant integer F = N - (E + 1);
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using FPCR to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = FPUnpack(op, fpcr, fpexc);
(op, fpcr);
if fptype IN { if fptype ==FPType_SNaN,|| fptype == FPType_QNaN,then
result = FPType_InfinityFPProcessNaN} then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:(fptype, op, fpcr);
elsif fptype ==Zeros(F);
else
exp = 1022+intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// Extract integer component.
int_result = elsif fptype == RoundDownFPType_Infinity(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when&& sign == '0' then
result = FPRounding_TIEEVENFPInfinity
round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1');
when(sign);
elsif sign == '1' then
result = FPRounding_POSINFFPDefaultNaN
round_up = error != 0.0;
when(); FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = error != 0.0 && int_result < 0;
when FPRounding_TIEAWAY
round_up = error > 0.5 || (error == 0.5 && int_result >= 0);
if round_up then int_result = int_result + 1;
overflow = int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1);
if overflow then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
// This case shouldn't set Inexact.
error = 0.0;
else
// Convert integer value into an equivalent real value.
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact.
if real_result == 0.0 then
result = else
result = FPZero(sign);
else
result = FPRound(real_result, fpcr,(Sqrt(value), fpcr); FPRounding_ZEROFPProcessDenorm);
// Generate inexact exceptions.
if error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
(fptype, N, fpcr);
return result;
// FPRSqrtEstimate()
// =================
// FPSub()
// =======
bits(N) FPRSqrtEstimate(bits(N) operand,FPSub(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
// When using alternative floating-point behaviour, do not generate
// floating-point exceptions and flush denormal input to zero.
boolean altfp = rounding = HaveAltFPFPRoundingMode() && !(fpcr);
(type1,sign1,value1) =UsingAArch32FPUnpack() && fpcr.AH == '1';
boolean fpexc = !altfp;
if altfp then fpcr.<FIZ,FZ> = '11';
(fptype,sign,value) =(op1, fpcr);
(type2,sign2,value2) = FPUnpack(operand, fpcr, fpexc);
if fptype ==(op2, fpcr);
(done,result) = FPType_SNaNFPProcessNaNs || fptype ==(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_QNaNFPType_Infinity then
result =);
inf2 = (type2 == FPProcessNaNFPType_Infinity(fptype, operand, fpcr, fpexc);
elsif fptype ==);
zero1 = (type1 == FPType_Zero then
result =);
zero2 = (type2 == FPInfinityFPType_Zero(sign);
if fpexc then);
if inf1 && inf2 && sign1 == sign2 then
result = FPProcessException(FPExc_DivideByZero, fpcr);
elsif sign == '1' then
result = FPDefaultNaN();
if fpexc then(); FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = FPType_InfinityFPInfinity then
result =('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == NOT(sign2) then
result = FPZero('0');
else
// Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the
// evenness or oddness of the exponent unchanged, and calculate result exponent.
// Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision
// biased version of -1 or -2, fraction = original fraction extended with zeros.
case N of
when 16
fraction = operand<9:0> :(sign1);
else
result_value = value1 - value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == ZerosFPRounding_NEGINF(42);
exp =then '1' else '0';
result = UIntFPZero(operand<14:10>);
when 32
fraction = operand<22:0> :(result_sign);
else
result = ZerosFPRound(29);
exp =(result_value, fpcr, rounding); UIntFPProcessDenorms(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
while fraction<51> == '0' do
fraction = fraction<50:0> : '0';
exp = exp - 1;
fraction = fraction<50:0> : '0';
integer scaled;
boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp;
if !increasedprecision then
if exp<0> == '0' then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('01':fraction<51:45>);
else
if exp<0> == '0' then
scaled = UInt('1':fraction<51:41>);
else
scaled = UInt('01':fraction<51:42>);
case N of
when 16 result_exp = ( 44 - exp) DIV 2;
when 32 result_exp = ( 380 - exp) DIV 2;
when 64 result_exp = (3068 - exp) DIV 2;
estimate = RecipSqrtEstimate(scaled, increasedprecision);
// Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a
// fixed-point result in the range [1.0 .. 2.0].
// Convert to scaled floating point result with copied sign bit and high-order
// fraction bits, and exponent calculated above.
case N of
when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros(2);
when 32
if !increasedprecision then
result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15);
else
result = '0' : result_exp<N-25:0> : estimate<11:0>:Zeros(11);
when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:Zeros(44);
(type1, type2, N, fpcr);
return result;
// RecipSqrtEstimate()
// ===================
// Compute estimate of reciprocal square root of 9-bit fixed-point number.
//
// a is in range 128 .. 511 or 1024 .. 4095, with increased precision,
// representing a number in the range 0.25 <= x < 1.0.
// increasedprecision determines if the mantissa is 8-bit or 12-bit.
// result is in the range 256 .. 511 or 4096 .. 8191, with increased precision,
// representing a number in the range 1.0 to 511/256 or 8191/4096.
// FPThree()
// =========
integerbits(N) RecipSqrtEstimate(integer a, boolean increasedprecision)
integer r;
if !increasedprecision then
assert 128 <= a && a < 512;
if a < 256 then // 0.25 .. 0.5
a = a*2+1; // a in units of 1/512 rounded to nearest
else // 0.5 .. 1.0
a = (a >> 1) << 1; // Discard bottom bit
a = (a+1)*2; // a in units of 1/256 rounded to nearest
integer b = 512;
while a*(b+1)*(b+1) < 2^28 do
b = b+1;
// b = largest b such that b < 2^14 / sqrt(a)
r = (b+1) DIV 2; // Round to nearest
assert 256 <= r && r < 512;
else
assert 1024 <= a && a < 4096;
real real_val;
real error;
integer int_val;
if a < 2048 then // 0.25... 0.5
a = a*2 + 1; // Take 10 bits of fraction and force a 1 at the bottom
real_val = Real(a)/2.0;
else // 0.5..1.0
a = (a >> 1) << 1; // Discard bottom bit
a = a+1; // Taking 10 bits of the fraction and force a 1 at the bottom
real_val = Real(a);
real_val = Sqrt(real_val); // This number will lie in the range of 32 to 64
// Round to nearest even for a DP float number
real_val = real_val * Real(2^47); // The integer is the size of the whole DP mantissa
int_val =FPThree(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1': RoundDownZeros(real_val); // Calculate rounding value
error = real_val - Real(int_val);
round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case
if round_up then int_val = int_val+1;
real_val = Real(2^65)/Real(int_val); // Lies in the range 4096 <= real_val < 8192
int_val =(E-1);
frac = '1': RoundDownZeros(real_val); // Round that (to nearest even) to give integer
error = real_val - Real(int_val);
round_up = (error > 0.5 || (error == 0.5 && int_val<0> == '1'));
if round_up then int_val = int_val+1;
(F-1);
result = sign : exp : frac;
r = int_val;
assert 4096 <= r && r < 8192;
return r; return result;
// FPSqrt()
// ========
// FPToFixed()
// ===========
bits(N)// Convert N-bit precision floating point OP to M-bit fixed point with
// FBITS fractional bits, controlled by UNSIGNED and ROUNDING.
bits(M) FPSqrt(bits(N) op,FPToFixed(bits(N) op, integer fbits, boolean unsigned, FPCRType fpcr)
assert N IN {16,32,64};
(fptype,sign,value) =fpcr, FPRounding rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
assert fbits >= 0;
assert rounding != FPRounding_ODD;
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using fpcr to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = FPUnpack(op, fpcr);
(op, fpcr, fpexc);
// If NaN, set cumulative flag or take exception.
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result =then FPProcessNaNFPProcessException(fptype, op, fpcr);
elsif fptype ==( FPType_ZeroFPExc_InvalidOp then
result =, fpcr);
// Scale by fractional bits and produce integer rounded towards minus-infinity.
value = value * 2.0^fbits;
int_result = FPZeroRoundDown(sign);
elsif fptype ==(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when FPType_InfinityFPRounding_TIEEVEN && sign == '0' then
result =round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPInfinityFPRounding_POSINF(sign);
elsif sign == '1' then
result =round_up = (error != 0.0);
when FPDefaultNaNFPRounding_NEGINF();round_up = FALSE;
when
FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Generate saturated result and exceptions.
(result, overflow) = SatQ(int_result, M, unsigned);
if overflow then
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = elsif error != 0.0 then FPRoundFPProcessException(Sqrt(value), fpcr);(
FPProcessDenormFPExc_Inexact(fptype, N, fpcr);
, fpcr);
return result;
// FPSub()
// =======
// FPToFixedJS()
// =============
bits(N)// Converts a double precision floating point input value
// to a signed integer, with rounding to zero.
(bits(N), bit) FPSub(bits(N) op1, bits(N) op2,FPToFixedJS(bits(M) op, FPCRType fpcr)
fpcr, boolean Is64)
assert M == 64 && N == 32;
assert N IN {16,32,64};
rounding = // If FALSE, never generate Input Denormal floating-point exceptions.
fpexc_idenorm = !( FPRoundingModeHaveAltFP(fpcr);
(type1,sign1,value1) =() && ! FPUnpackUsingAArch32(op1, fpcr);
(type2,sign2,value2) =() && fpcr.AH == '1');
// Unpack using fpcr to determine if subnormals are flushed-to-zero.
(fptype,sign,value) = FPUnpack(op2, fpcr);
(done,result) =(op, fpcr, fpexc_idenorm);
Z = '1';
// If NaN, set cumulative flag or take exception.
if fptype == FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==|| fptype == FPType_InfinityFPType_QNaN);
inf2 = (type2 ==then FPType_InfinityFPProcessException);
zero1 = (type1 ==( FPType_ZeroFPExc_InvalidOp);
zero2 = (type2 ==, fpcr);
Z = '0';
int_result = FPType_ZeroRoundDown);
(value);
error = value - Real(int_result);
if inf1 && inf2 && sign1 == sign2 then
result = // Determine whether supplied rounding mode requires an increment.
round_it_up = (error != 0.0 && int_result < 0);
if round_it_up then int_result = int_result + 1;
if int_result < 0 then
result = int_result - 2^32* FPDefaultNaNRoundUp();(Real(int_result)/Real(2^32));
else
result = int_result - 2^32*
RoundDown(Real(int_result)/Real(2^32));
// Generate exceptions.
if int_result < -(2^31) || int_result > (2^31)-1 then
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = Z = '0';
elsif error != 0.0 then FPInfinityFPProcessException('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result =( FPInfinityFPExc_Inexact('1');
elsif zero1 && zero2 && sign1 == NOT(sign2) then
result =, fpcr);
Z = '0';
elsif sign == '1' && value == 0.0 then
Z = '0';
elsif sign == '0' && value == 0.0 && ! FPZeroIsZero(sign1);
else
result_value = value1 - value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding ==(op<51:0>) then
Z = '0';
if fptype == FPRounding_NEGINFFPType_Infinity then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding);
FPProcessDenorms(type1, type2, N, fpcr);
then result = 0;
return result; return (result<N-1:0>, Z);
// FPThree()
// =========
// FPTwo()
// =======
bits(N) FPThree(bit sign)
FPTwo(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1':Zeros(E-1);
frac = '1': frac =Zeros(F-1);
(F);
result = sign : exp : frac;
return result;
// FPToFixed()
// ===========
// Convert N-bit precision floating point OP to M-bit fixed point with
// FBITS fractional bits, controlled by UNSIGNED and ROUNDING.
bits(M)enumeration FPToFixed(bits(N) op, integer fbits, boolean unsigned,FPType { FPCRType fpcr,FPType_Zero, FPRounding rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
assert fbits >= 0;
assert rounding !=FPType_Denormal, FPRounding_ODD;
// When alternative floating-point support is TRUE, do not generate
// Input Denormal floating-point exceptions.
altfp =FPType_Nonzero, HaveAltFP() && !FPType_Infinity,UsingAArch32() && fpcr.AH == '1';
fpexc = !altfp;
// Unpack using fpcr to determine if subnormals are flushed-to-zero.
(fptype,sign,value) =FPType_QNaN, FPUnpack(op, fpcr, fpexc);
// If NaN, set cumulative flag or take exception.
if fptype == FPType_SNaN || fptype == FPType_QNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
// Scale by fractional bits and produce integer rounded towards minus-infinity.
value = value * 2.0^fbits;
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0);
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Generate saturated result and exceptions.
(result, overflow) = SatQ(int_result, M, unsigned);
if overflow then
FPProcessException(FPExc_InvalidOp, fpcr);
elsif error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
return result;FPType_SNaN};
// FPToFixedJS()
// =============
// FPUnpack()
// ==========
// Converts a double precision floating point input value
// to a signed integer, with rounding to zero.
(bits(N), bit)(FPType, bit, real) FPToFixedJS(bits(M) op,FPUnpack(bits(N) fpval, FPCRType fpcr, boolean Is64)
assert M == 64 && N == 32;
// If FALSE, never generate Input Denormal floating-point exceptions.
fpexc_idenorm = !(fpcr)
fpcr.AHP = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) =HaveAltFPFPUnpackBase() && !(fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPUnpack()
// ==========
//
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
(FPType, bit, real)UsingAArch32() && fpcr.AH == '1');
// Unpack using fpcr to determine if subnormals are flushed-to-zero.
(fptype,sign,value) =FPUnpack(bits(N) fpval, FPUnpackFPCRType(op, fpcr, fpexc_idenorm);
Z = '1';
// If NaN, set cumulative flag or take exception.
if fptype ==fpcr, boolean fpexc)
fpcr.AHP = '0';
(fp_type, sign, value) = FPType_SNaNFPUnpackBase || fptype == FPType_QNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
Z = '0';
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment.
round_it_up = (error != 0.0 && int_result < 0);
if round_it_up then int_result = int_result + 1;
if int_result < 0 then
result = int_result - 2^32*RoundUp(Real(int_result)/Real(2^32));
else
result = int_result - 2^32*RoundDown(Real(int_result)/Real(2^32));
// Generate exceptions.
if int_result < -(2^31) || int_result > (2^31)-1 then
FPProcessException(FPExc_InvalidOp, fpcr);
Z = '0';
elsif error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
Z = '0';
elsif sign == '1' && value == 0.0 then
Z = '0';
elsif sign == '0' && value == 0.0 && !IsZero(op<51:0>) then
Z = '0';
if fptype == FPType_Infinity then result = 0;
return (result<N-1:0>, Z);(fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPTwo()
// =======
// FPUnpackBase()
// ==============
bits(N)(FPType, bit, real) FPTwo(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1':FPUnpackBase(bits(N) fpval,ZerosFPCRType(E-1);
frac =fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) = (fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPUnpackBase()
// ==============
//
// Unpack a floating-point number into its type, sign bit and the real number
// that it represents. The real number result has the correct sign for numbers
// and infinities, is very large in magnitude for infinities, and is 0.0 for
// NaNs. (These values are chosen to simplify the description of comparisons
// and conversions.)
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
(FPType, bit, real) FPUnpackBase(bits(N) fpval, FPCRType fpcr, boolean fpexc)
assert N IN {16,32,64};
boolean altfp = HaveAltFP() && !UsingAArch32();
boolean fiz = altfp && fpcr.FIZ == '1';
boolean fz = fpcr.FZ == '1' && !(altfp && fpcr.AH == '1');
if N == 16 then
sign = fpval<15>;
exp16 = fpval<14:10>;
frac16 = fpval<9:0>;
if IsZero(exp16) then
if IsZero(frac16) || fpcr.FZ16 == '1' then
fptype = FPType_Zero; value = 0.0;
else
fptype = FPType_Denormal; value = 2.0^-14 * (Real(UInt(frac16)) * 2.0^-10);
elsif IsOnes(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format
if IsZero(frac16) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac16<9> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp16)-15) * (1.0 + Real(UInt(frac16)) * 2.0^-10);
elsif N == 32 then
sign = fpval<31>;
exp32 = fpval<30:23>;
frac32 = fpval<22:0>;
if IsZero(exp32) then
if IsZero(frac32) then
// Produce zero if value is zero.
fptype = FPType_Zero; value = 0.0;
elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01
fptype = FPType_Zero; value = 0.0;
// Check whether to raise Input Denormal floating-point exception.
// fpcr.FIZ==1 does not raise Input Denormal exception.
if fz then
// Denormalized input flushed to zero
if fpexc then FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Denormal; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23);
elsif IsOnes(exp32) then
if IsZero(frac32) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac32<22> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp32)-127) * (1.0 + Real(UInt(frac32)) * 2.0^-23);
else // N == 64
sign = fpval<63>;
exp64 = fpval<62:52>;
frac64 = fpval<51:0>;
if IsZero(exp64) then
if IsZero(frac64) then
// Produce zero if value is zero.
fptype = FPType_Zero; value = 0.0;
elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01
fptype = FPType_Zero; value = 0.0;
// Check whether to raise Input Denormal floating-point exception.
// fpcr.FIZ==1 does not raise Input Denormal exception.
if fz then
// Denormalized input flushed to zero
if fpexc then FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Denormal; value = 2.0^-1022 * (Real(UInt(frac64)) * 2.0^-52);
elsif IsOnes(exp64) then
if IsZero(frac64) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UIntZerosFPUnpackBase(F);
result = sign : exp : frac;
(frac64)) * 2.0^-52);
return result; if sign == '1' then value = -value;
return (fptype, sign, value);
enumeration// FPUnpackCV()
// ============
//
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
(FPType, bit, real) FPType {FPUnpackCV(bits(N) fpval,FPType_Zero,fpcr)
fpcr.FZ16 = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) =
FPType_Denormal,
FPType_Nonzero,
FPType_Infinity,
FPType_QNaN,
FPType_SNaN};(fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPUnpack()
// ==========
// FPZero()
// ========
(FPType, bit, real)bits(N) FPUnpack(bits(N) fpval,FPZero(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = FPCRTypeZeros fpcr)
fpcr.AHP = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) =(E);
frac = FPUnpackBaseZeros(fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPUnpack()
// ==========
//
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
(FPType, bit, real) FPUnpack(bits(N) fpval, FPCRType fpcr, boolean fpexc)
fpcr.AHP = '0';
(fp_type, sign, value) = FPUnpackBase(fpval, fpcr, fpexc);
return (fp_type, sign, value);(F);
result = sign : exp : frac;
return result;
// FPUnpackBase()
// VFPExpandImm()
// ==============
(FPType, bit, real)bits(N) FPUnpackBase(bits(N) fpval,VFPExpandImm(bits(8) imm8)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - E - 1;
sign = imm8<7>;
exp = NOT(imm8<6>): FPCRTypeReplicate fpcr)
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) =(imm8<6>,E-3):imm8<5:4>;
frac = imm8<3:0>: FPUnpackBaseZeros(fpval, fpcr, fpexc);
return (fp_type, sign, value);
// FPUnpackBase()
// ==============
//
// Unpack a floating-point number into its type, sign bit and the real number
// that it represents. The real number result has the correct sign for numbers
// and infinities, is very large in magnitude for infinities, and is 0.0 for
// NaNs. (These values are chosen to simplify the description of comparisons
// and conversions.)
//
// The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the
// generation of floating-point exceptions. Status information is updated
// directly in the FPSR where appropriate.
(FPType, bit, real) FPUnpackBase(bits(N) fpval, FPCRType fpcr, boolean fpexc)
assert N IN {16,32,64};
boolean altfp = HaveAltFP() && !UsingAArch32();
boolean fiz = altfp && fpcr.FIZ == '1';
boolean fz = fpcr.FZ == '1' && !(altfp && fpcr.AH == '1');
if N == 16 then
sign = fpval<15>;
exp16 = fpval<14:10>;
frac16 = fpval<9:0>;
if IsZero(exp16) then
if IsZero(frac16) || fpcr.FZ16 == '1' then
fptype = FPType_Zero; value = 0.0;
else
fptype = FPType_Denormal; value = 2.0^-14 * (Real(UInt(frac16)) * 2.0^-10);
elsif IsOnes(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format
if IsZero(frac16) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac16<9> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp16)-15) * (1.0 + Real(UInt(frac16)) * 2.0^-10);
elsif N == 32 then
sign = fpval<31>;
exp32 = fpval<30:23>;
frac32 = fpval<22:0>;
if IsZero(exp32) then
if IsZero(frac32) then
// Produce zero if value is zero.
fptype = FPType_Zero; value = 0.0;
elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01
fptype = FPType_Zero; value = 0.0;
// Check whether to raise Input Denormal floating-point exception.
// fpcr.FIZ==1 does not raise Input Denormal exception.
if fz then
// Denormalized input flushed to zero
if fpexc then FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Denormal; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23);
elsif IsOnes(exp32) then
if IsZero(frac32) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac32<22> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp32)-127) * (1.0 + Real(UInt(frac32)) * 2.0^-23);
else // N == 64
sign = fpval<63>;
exp64 = fpval<62:52>;
frac64 = fpval<51:0>;
if IsZero(exp64) then
if IsZero(frac64) then
// Produce zero if value is zero.
fptype = FPType_Zero; value = 0.0;
elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01
fptype = FPType_Zero; value = 0.0;
// Check whether to raise Input Denormal floating-point exception.
// fpcr.FIZ==1 does not raise Input Denormal exception.
if fz then
// Denormalized input flushed to zero
if fpexc then FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Denormal; value = 2.0^-1022 * (Real(UInt(frac64)) * 2.0^-52);
elsif IsOnes(exp64) then
if IsZero(frac64) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UInt(frac64)) * 2.0^-52);
(F-4);
result = sign : exp : frac;
if sign == '1' then value = -value;
return (fptype, sign, value); return result;
// FPUnpackCV()
// ============
//
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
// AddWithCarry()
// ==============
// Integer addition with carry input, returning result and NZCV flags
(FPType, bit, real)(bits(N), bits(4)) FPUnpackCV(bits(N) fpval,AddWithCarry(bits(N) x, bits(N) y, bit carry_in)
integer unsigned_sum = FPCRTypeUInt fpcr)
fpcr.FZ16 = '0';
boolean fpexc = TRUE; // Generate floating-point exceptions
(fp_type, sign, value) =(x) + (y) + UInt(carry_in);
integer signed_sum = SInt(x) + SInt(y) + UInt(carry_in);
bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0>
bit n = result<N-1>;
bit z = if IsZero(result) then '1' else '0';
bit c = if UInt(result) == unsigned_sum then '0' else '1';
bit v = if SIntFPUnpackBaseUInt(fpval, fpcr, fpexc);
return (fp_type, sign, value);(result) == signed_sum then '0' else '1';
return (result, n:z:c:v);
// FPZero()
// ========
// AArch64.BranchAddr()
// ====================
// Return the virtual address with tag bits removed for storing to the program counter.
bits(N)bits(64) FPZero(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =AArch64.BranchAddr(bits(64) vaddress)
assert ! ZerosUsingAArch32(E);
frac =();
msbit = (vaddress, TRUE, PSTATE.EL);
if msbit == 63 then
return vaddress;
elsif (PSTATE.EL IN {EL0, EL1} || IsInHost()) && vaddress<msbit> == '1' then
return SignExtend(vaddress<msbit:0>);
else
return ZeroExtendZerosAddrTop(F);
result = sign : exp : frac;
return result;(vaddress<msbit:0>);
// VFPExpandImm()
// ==============
bits(N)enumeration VFPExpandImm(bits(8) imm8)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - E - 1;
sign = imm8<7>;
exp = NOT(imm8<6>):AccType {Replicate(imm8<6>,E-3):imm8<5:4>;
frac = imm8<3:0>:AccType_NORMAL,AccType_VEC, // Normal loads and stores
AccType_STREAM, AccType_VECSTREAM, // Streaming loads and stores
AccType_ATOMIC, AccType_ATOMICRW, // Atomic loads and stores
AccType_ORDERED, AccType_ORDEREDRW, // Load-Acquire and Store-Release
AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access
AccType_ORDEREDATOMICRW,
AccType_ATOMICLS64, // Atomic 64-byte loads and stores
AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease
AccType_UNPRIV, // Load and store unprivileged
AccType_IFETCH, // Instruction fetch
AccType_TTW, // Translation table walk
AccType_NONFAULT, // Non-faulting loads
AccType_CNOTFIRST, // Contiguous FF load, not first element
AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted
// to a memory access that uses the EL2 translation regime
// Other operations
AccType_DC, // Data cache maintenance
AccType_DC_UNPRIV, // Data cache maintenance instruction used at EL0
AccType_IC, // Instruction cache maintenance
AccType_DCZVA, // DC ZVA instructions
Zeros(F-4);
result = sign : exp : frac;
return result;AccType_AT}; // Address translation
// AddWithCarry()
// ==============
// Integer addition with carry input, returning result and NZCV flags
(bits(N), bits(4))type AddWithCarry(bits(N) x, bits(N) y, bit carry_in)
integer unsigned_sum =AccessDescriptor is ( UIntAccType(x) +acctype, UIntMPAMinfo(y) + UInt(carry_in);
integer signed_sum = SInt(x) + SInt(y) + UInt(carry_in);
bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0>
bit n = result<N-1>;
bit z = if IsZero(result) then '1' else '0';
bit c = if UInt(result) == unsigned_sum then '0' else '1';
bit v = if SInt(result) == signed_sum then '0' else '1';
return (result, n:z:c:v);mpam,
boolean page_table_walk,
boolean secondstage,
boolean s2fs1walk,
integer level
)
// AArch64.BranchAddr()
// ====================
// Return the virtual address with tag bits removed for storing to the program counter.
// AddrTop()
// =========
// Return the MSB number of a virtual address in the stage 1 translation regime for "el".
// If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits.
bits(64)integer AArch64.BranchAddr(bits(64) vaddress)
assert !AddrTop(bits(64) address, boolean IsInstr, bits(2) el)
assertUsingAArch32HaveEL();
msbit =(el);
regime = AddrTopS1TranslationRegime(vaddress, TRUE, PSTATE.EL);
if msbit == 63 then
return vaddress;
elsif (PSTATE.EL IN {(el);
ifEL0ELUsingAArch32,(regime) then
// AArch32 translation regime.
return 31;
else
if EL1EffectiveTBI} || IsInHost()) && vaddress<msbit> == '1' then
return SignExtend(vaddress<msbit:0>);
else
return ZeroExtend(vaddress<msbit:0>);(address, IsInstr, el) == '1' then
return 55;
else
return 63;
enumerationtype AccType {AddressDescriptor is (AccType_NORMAL,fault, // fault.statuscode indicates whether the address is valid AccType_VEC, // Normal loads and storesmemattrs,
AccType_STREAM, AccType_VECSTREAM, // Streaming loads and stores
AccType_A32LSMD, // Load and store multiple
AccType_ATOMIC, AccType_ATOMICRW, // Atomic loads and stores
AccType_ORDERED, AccType_ORDEREDRW, // Load-Acquire and Store-Release
AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access
AccType_ORDEREDATOMICRW,
AccType_ATOMICLS64, // Atomic 64-byte loads and stores
AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease
AccType_UNPRIV, // Load and store unprivileged
AccType_IFETCH, // Instruction fetch
AccType_TTW, // Translation table walk
AccType_NONFAULT, // Non-faulting loads
AccType_CNOTFIRST, // Contiguous FF load, not first element
AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted
// to a memory access that uses the EL2 translation regime
// Other operations
AccType_DC, // Data cache maintenance
AccType_IC, // Instruction cache maintenance
AccType_DCZVA, // DC ZVA instructions
AccType_ATPAN, // Address translation with PAN permission checks
AccType_AT}; // Address translationpaddress,
bits(64) vaddress
)
typeconstant bits(2) AccessDescriptor is (MemHint_No = '00'; // No Read-Allocate, No Write-Allocate
constant bits(2)
AccType acctype,MemHint_WA = '01'; // No Read-Allocate, Write-Allocate
constant bits(2)
MemHint_RA = '10'; // Read-Allocate, No Write-Allocate
constant bits(2) MPAMinfo mpam,
boolean page_table_walk,
boolean secondstage,
boolean s2fs1walk,
integer level
)MemHint_RWA = '11'; // Read-Allocate, Write-Allocate
// AddrTop()
// =========
// Return the MSB number of a virtual address in the stage 1 translation regime for "el".
// If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits.
// BigEndian()
// ===========
integerboolean AddrTop(bits(64) address, boolean IsInstr, bits(2) el)
assertBigEndian( HaveELAccType(el);
regime =acctype)
boolean bigend;
if S1TranslationRegimeHaveNV2Ext(el);
if() && acctype == ELUsingAArch32AccType_NV2REGISTER(regime) then
// AArch32 translation regime.
return 31;
else
ifthen
return SCTLR_EL2.EE == '1';
if () then
bigend = (PSTATE.E != '0');
elsif PSTATE.EL == EL0 then
bigend = (SCTLR[].E0E != '0');
else
bigend = (SCTLREffectiveTBIUsingAArch32(address, IsInstr, el) == '1' then
return 55;
else
return 63;[].EE != '0');
return bigend;
type// BigEndianReverse()
// ==================
bits(width) AddressDescriptor is (BigEndianReverse (bits(width) value)
assert width IN {8, 16, 32, 64, 128};
integer half = width DIV 2;
if width == 8 then return value;
return
FaultRecordBigEndianReverse fault, // fault.statuscode indicates whether the address is valid(value<half-1:0>) :
MemoryAttributesBigEndianReverse memattrs,
FullAddress paddress,
bits(64) vaddress
)(value<width-1:half>);
constant bits(2) MemHint_No = '00'; // No Read-Allocate, No Write-Allocate
MemAttr_NC = '00'; // Non-cacheable
constant bits(2) MemHint_WA = '01'; // No Read-Allocate, Write-Allocate
MemAttr_WT = '10'; // Write-through
constant bits(2) MemHint_RA = '10'; // Read-Allocate, No Write-Allocate
constant bits(2)MemAttr_WB = '11'; // Write-back MemHint_RWA = '11'; // Read-Allocate, Write-Allocate
// BigEndian()
// ===========
// CreateAccessDescriptor()
// ========================
booleanAccessDescriptor BigEndian(CreateAccessDescriptor(AccType acctype)
boolean bigend;
ifacctype) HaveNV2ExtAccessDescriptor() && acctype ==accdesc;
accdesc.acctype = acctype;
accdesc.mpam = AccType_NV2REGISTERGenMPAMcurEL then
return SCTLR_EL2.EE == '1';
if UsingAArch32() then
bigend = (PSTATE.E != '0');
elsif PSTATE.EL == EL0 then
bigend = (SCTLR[].E0E != '0');
else
bigend = (SCTLR[].EE != '0');
return bigend;(acctype);
accdesc.page_table_walk = FALSE;
return accdesc;
// BigEndianReverse()
// ==================
// CreateAccessDescriptorTTW()
// ===========================
bits(width)AccessDescriptor BigEndianReverse (bits(width) value)
assert width IN {8, 16, 32, 64, 128};
integer half = width DIV 2;
if width == 8 then return value;
returnCreateAccessDescriptorTTW( BigEndianReverseAccType(value<half-1:0>) :acctype, boolean secondstage,
boolean s2fs1walk, integer level) accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurELBigEndianReverseAccessDescriptor(value<width-1:half>);(acctype);
accdesc.page_table_walk = TRUE;
accdesc.s2fs1walk = s2fs1walk;
accdesc.secondstage = secondstage;
accdesc.level = level;
return accdesc;
constant bits(2) MemAttr_NC = '00'; // Non-cacheable
constant bits(2)DataMemoryBarrier( MemAttr_WT = '10'; // Write-through
constant bits(2)domain, MemAttr_WB = '11'; // Write-backtypes);
// CreateAccessDescriptor()
// ========================
AccessDescriptor CreateAccessDescriptor(DataSynchronizationBarrier(AccTypeMBReqDomain acctype)domain,
AccessDescriptorMBReqTypes accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype);
accdesc.page_table_walk = FALSE;
return accdesc;types);
// CreateAccessDescriptorTTW()
// ===========================
AccessDescriptortype CreateAccessDescriptorTTW(DescriptorUpdate is (
boolean AF, // AF needs to be set
boolean AP, // AP[2] / S2AP[2] will be modifiedAccTypeAddressDescriptor acctype, boolean secondstage,
boolean s2fs1walk, integer level)
AccessDescriptor accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype);
accdesc.page_table_walk = TRUE;
accdesc.s2fs1walk = s2fs1walk;
accdesc.secondstage = secondstage;
accdesc.level = level;
return accdesc;descaddr // Descriptor to be updated
)
enumeration DataMemoryBarrier(DeviceType {MBReqDomain domain,DeviceType_GRE, DeviceType_nGRE, DeviceType_nGnRE, MBReqTypes types);DeviceType_nGnRnE};
// EffectiveTBI()
// ==============
// Returns the effective TBI in the AArch64 stage 1 translation regime for "el".
bit DataSynchronizationBarrier(EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el)
assertMBReqDomainHaveEL domain,(el);
regime = (el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExtMBReqTypesS1TranslationRegime types, boolean nXS);() || tbid == '0' || !IsInstr) then '1' else '0');
type// EffectiveTCMA()
// ===============
// Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el".
bit DescriptorUpdate is (
boolean AF, // AF needs to be set
boolean AP, // AP[2] / S2AP[2] will be modifiedEffectiveTCMA(bits(64) address, bits(2) el)
assert
(el);
regime = S1TranslationRegime(el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0;
else
tcma = TCR_EL2.TCMA;
when EL3AddressDescriptorHaveEL descaddr // Descriptor to be updated
)tcma = TCR_EL3.TCMA;
return tcma;
enumeration DeviceType {Fault {DeviceType_GRE,Fault_None, DeviceType_nGRE,Fault_AccessFlag, DeviceType_nGnRE,Fault_Alignment, DeviceType_nGnRnE};Fault_Background,Fault_Domain,
Fault_Permission,
Fault_Translation,
Fault_AddressSize,
Fault_SyncExternal,
Fault_SyncExternalOnWalk,
Fault_SyncParity,
Fault_SyncParityOnWalk,
Fault_AsyncParity,
Fault_AsyncExternal,
Fault_Debug,
Fault_TLBConflict,
Fault_BranchTarget,
Fault_HWUpdateAccessFlag,
Fault_Lockdown,
Fault_Exclusive,
Fault_ICacheMaint};
// EffectiveTBI()
// ==============
// Returns the effective TBI in the AArch64 stage 1 translation regime for "el".
bittype EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el)
assertFaultRecord is ( HaveELFault(el);
regime =statuscode, // Fault Status S1TranslationRegimeAccType(el);
assert(!acctype, // Type of access that faultedELUsingAArch32FullAddress(regime));
ipaddress, // Intermediate physical address
boolean s2fs1walk, // Is on a Stage 1 translation table walk
boolean write, // TRUE for a write, FALSE for a read
integer level, // For translation, access flag and permission faults
bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts
boolean secondstage, // Is a Stage 2 abort
bits(4) domain, // Domain number, AArch32 only
bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET
bits(4) debugmoe) // Debug method of entry, from AArch32 only
case regime of
whentype EL1
tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0;
ifPARTIDtype = bits(16);
type HavePACExt() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
whenPMGtype = bits(8);
type EL2
ifMPAMinfo is (
bit mpam_ns, HaveVirtHostExtPARTIDtype() &&partid, ELIsInHostPMGtype(el) then
tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExt() || tbid == '0' || !IsInstr) then '1' else '0');pmg
)
// EffectiveTCMA()
// ===============
// Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el".
bittype EffectiveTCMA(bits(64) address, bits(2) el)
assertFullAddress is (
bits(52) address,
bit NS // '0' = Secure, '1' = Non-secure
) HaveEL(el);
regime = S1TranslationRegime(el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0;
else
tcma = TCR_EL2.TCMA;
when EL3
tcma = TCR_EL3.TCMA;
return tcma;
enumeration// Signals the memory system that memory accesses of type HINT to or from the specified address are
// likely in the near future. The memory system may take some action to speed up the memory
// accesses when they do occur, such as pre-loading the the specified address into one or more
// caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint
// stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a
// synchronous abort due to Alignment or Translation faults and the like. Its only effect on
// software-visible state should be on caches and TLBs associated with address, which must be
// accessible by reads, writes or execution, as defined in the translation regime of the current
// Exception level. It is guaranteed not to access Device memory.
// A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative
// instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any
// memory location that cannot be accessed by instruction fetches. Fault {Hint_Prefetch(bits(64) address,Fault_None,
Fault_AccessFlag,
Fault_Alignment,
Fault_Background,
Fault_Domain,
Fault_Permission,
Fault_Translation,
Fault_AddressSize,
Fault_SyncExternal,
Fault_SyncExternalOnWalk,
Fault_SyncParity,
Fault_SyncParityOnWalk,
Fault_AsyncParity,
Fault_AsyncExternal,
Fault_Debug,
Fault_TLBConflict,
Fault_BranchTarget,
Fault_HWUpdateAccessFlag,
Fault_Lockdown,
Fault_Exclusive,
Fault_ICacheMaint};hint, integer target, boolean stream);
typeenumeration FaultRecord is (MBReqDomain {Fault statuscode, // Fault StatusMBReqDomain_Nonshareable,
AccType acctype, // Type of access that faulted
FullAddress ipaddress, // Intermediate physical address
boolean s2fs1walk, // Is on a Stage 1 translation table walk
boolean write, // TRUE for a write, FALSE for a read
integer level, // For translation, access flag and permission faults
bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts
boolean secondstage, // Is a Stage 2 abort
bits(4) domain, // Domain number, AArch32 only
bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET
bits(4) debugmoe) // Debug method of entry, from AArch32 only
type PARTIDtype = bits(16);
typeMBReqDomain_InnerShareable, PMGtype = bits(8);
typeMBReqDomain_OuterShareable, MPAMinfo is (
bit mpam_ns,
PARTIDtype partid,
PMGtype pmg
)MBReqDomain_FullSystem};
typeenumeration FullAddress is (
bits(52) address,
bit NS // '0' = Secure, '1' = Non-secure
)MBReqTypes {MBReqTypes_Reads, MBReqTypes_Writes, MBReqTypes_All};
// Signals the memory system that memory accesses of type HINT to or from the specified address are
// likely in the near future. The memory system may take some action to speed up the memory
// accesses when they do occur, such as pre-loading the the specified address into one or more
// caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint
// stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a
// synchronous abort due to Alignment or Translation faults and the like. Its only effect on
// software-visible state should be on caches and TLBs associated with address, which must be
// accessible by reads, writes or execution, as defined in the translation regime of the current
// Exception level. It is guaranteed not to access Device memory.
// A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative
// instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any
// memory location that cannot be accessed by instruction fetches.type
Hint_Prefetch(bits(64) address,MemAttrHints is (
bits(2) attrs, // See MemAttr_*, Cacheability attributes
bits(2) hints, // See MemHint_*, Allocation hints
boolean transient
) PrefetchHint hint, integer target, boolean stream);
enumeration MBReqDomain {MemType {MBReqDomain_Nonshareable,MemType_Normal, MBReqDomain_InnerShareable,MemType_Device};
MBReqDomain_OuterShareable, MBReqDomain_FullSystem};
enumerationtype MBReqTypes {MemoryAttributes is (MBReqTypes_Reads,memtype, MBReqTypes_Writes,device, // For Device memory types inner, // Inner hints and attributes
MemAttrHintsMBReqTypes_All};outer, // Outer hints and attributes
boolean tagged, // Tagged access
boolean shareable,
boolean outershareable
)
type MemAttrHints is (
bits(2) attrs, // See MemAttr_*, Cacheability attributes
bits(2) hints, // See MemHint_*, Allocation hints
boolean transient
Permissions is (
bits(3) ap, // Access permission bits
bit xn, // Execute-never bit
bit xxn, // [Armv8.2] Extended execute-never bit for stage 2
bit pxn // Privileged execute-never bit
)
enumeration MemType {PrefetchHint {MemType_Normal,Prefetch_READ, MemType_Device};Prefetch_WRITE,Prefetch_EXEC};
type MemoryAttributes is (
MemType memtype,
DeviceType device, // For Device memory types
MemAttrHints inner, // Inner hints and attributes
MemAttrHints outer, // Outer hints and attributes
boolean tagged, // Tagged access
boolean shareable,
boolean outershareable
)SpeculativeStoreBypassBarrierToPA();
type Permissions is (
bits(3) ap, // Access permission bits
bit xn, // Execute-never bit
bit xxn, // [Armv8.2] Extended execute-never bit for stage 2
bit pxn // Privileged execute-never bit
)SpeculativeStoreBypassBarrierToVA();
enumerationtype PrefetchHint {TLBRecord is (Prefetch_READ,perms,
bit nG, // '0' = Global, '1' = not Global
bits(4) domain, // AArch32 only
bit GP, // Guarded Page
boolean contiguous, // Contiguous bit from translation table
integer level, // AArch32 Short-descriptor format: Indicates Section/Page
integer blocksize, // Describes size of memory translated in KBytes Prefetch_WRITE,descupdate, // [Armv8.1] Context for h/w update of table descriptor
bit CnP, // [Armv8.2] TLB entry can be shared between different PEs Prefetch_EXEC};addrdesc
)
constant integer LOG2_TAG_GRANULE = 4;
constant integer TAG_GRANULE = 1 << LOG2_TAG_GRANULESpeculativeStoreBypassBarrierToPA();;
// These two _Mem[] accessors are the hardware operations which perform single-copy atomic,
// aligned, little-endian memory accesses of size bytes from/to the underlying physical
// memory array of bytes.
//
// The functions address the array using desc.paddress which supplies:
// * A 52-bit physical address
// * A single NS bit to select between Secure and Non-secure parts of the array.
//
// The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming,
// etc and other parameters required to access the physical memory or for setting syndrome
// register in the event of an external abort.
bits(8*size) _Mem[AddressDescriptor desc, integer size, AccessDescriptor accdesc];
_Mem[AddressDescriptor desc, integer size, AccessDescriptorSpeculativeStoreBypassBarrierToVA();accdesc] = bits(8*size) value;
type// DefaultMPAMinfo
// ===============
// Returns default MPAM info. If secure is TRUE return default Secure
// MPAMinfo, otherwise return default Non-secure MPAMinfo.
MPAMinfo TLBRecord is (DefaultMPAMinfo(boolean secure)
PermissionsMPAMinfo perms,
bit nG, // '0' = Global, '1' = not Global
bits(4) domain, // AArch32 only
bit GP, // Guarded Page
boolean contiguous, // Contiguous bit from translation table
integer level, // AArch32 Short-descriptor format: Indicates Section/Page
integer blocksize, // Describes size of memory translated in KBytesDefaultInfo;
DefaultInfo.mpam_ns = if secure then '0' else '1';
DefaultInfo.partid =
DescriptorUpdateDefaultPARTID descupdate, // [Armv8.1] Context for h/w update of table descriptor
bit CnP, // [Armv8.2] TLB entry can be shared between different PEs;
DefaultInfo.pmg =
AddressDescriptorDefaultPMG addrdesc
);
return DefaultInfo;
constant integerconstant PARTIDtype LOG2_TAG_GRANULE = 4;
constant integerDefaultPARTID = 0<15:0>; TAG_GRANULE = 1 << LOG2_TAG_GRANULE;
// These two _Mem[] accessors are the hardware operations which perform single-copy atomic,
// aligned, little-endian memory accesses of size bytes from/to the underlying physical
// memory array of bytes.
//
// The functions address the array using desc.paddress which supplies:
// * A 52-bit physical address
// * A single NS bit to select between Secure and Non-secure parts of the array.
//
// The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming,
// etc and other parameters required to access the physical memory or for setting syndrome
// register in the event of an external abort.
bits(8*size)constant PMGtype _Mem[DefaultPMG = 0<7:0>;AddressDescriptor desc, integer size, AccessDescriptor accdesc];
_Mem[AddressDescriptor desc, integer size, AccessDescriptor accdesc] = bits(8*size) value;
// DefaultMPAMinfo
// ===============
// Returns default MPAM info. If secure is TRUE return default Secure
// MPAMinfo, otherwise return default Non-secure MPAMinfo.
// GenMPAMcurEL
// ============
// Returns MPAMinfo for the current EL and security state.
// May be called if MPAM is not implemented (but in an version that supports
// MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to
// EL if can and use that to drive MPAM information generation. If mode
// cannot be converted, MPAM is not implemented, or MPAM is disabled return
// default MPAM information for the current security state.
MPAMinfo DefaultMPAMinfo(boolean secure)GenMPAMcurEL(
MPAMinfoAccType DefaultInfo;
DefaultInfo.mpam_ns = if secure then '0' else '1';
DefaultInfo.partid =acctype)
bits(2) mpamel;
boolean validEL;
boolean securempam;
boolean InD = acctype IN { DefaultPARTIDAccType_IFETCH;
DefaultInfo.pmg =, };
if HaveEMPAMExt() then
boolean secure = IsSecure();
securempam = MPAM3_EL3.FORCE_NS == '0' && secure;
if MPAMisEnabled() && (!secure || MPAM3_EL3.SDEFLT == '0') then
if UsingAArch32() then
(validEL, mpamel) = ELFromM32(PSTATE.M);
else
mpamel = PSTATE.EL;
if validEL then
return genMPAM(UInt(mpamel), InD, securempam);
else
securempam = IsSecure();
if HaveMPAMExt() && MPAMisEnabled() then
if UsingAArch32() then
(validEL, mpamel) = ELFromM32(PSTATE.M);
else
validEL = TRUE;
mpamel = PSTATE.EL;
if validEL then
return genMPAM(UInt(mpamel), InD, securempam);
return DefaultMPAMinfoDefaultPMGAccType_IC;
return DefaultInfo;(securempam);
constant PARTIDtype// MAP_vPARTID
// ===========
// Performs conversion of virtual PARTID into physical PARTID
// Contains all of the error checking and implementation
// choices for the conversion.
(PARTIDtype, boolean) DefaultPARTID = 0<15:0>;MAP_vPARTID(PARTIDtype vpartid)
// should not ever be called if EL2 is not implemented
// or is implemented but not enabled in the current
// security state.
PARTIDtype ret;
boolean err;
integer virt = UInt( vpartid );
integer vpmrmax = UInt( MPAMIDR_EL1.VPMR_MAX );
// vpartid_max is largest vpartid supported
integer vpartid_max = (4 * vpmrmax) + 3;
// One of many ways to reduce vpartid to value less than vpartid_max.
if virt > vpartid_max then
virt = virt MOD (vpartid_max+1);
// Check for valid mapping entry.
if MPAMVPMV_EL2<virt> == '1' then
// vpartid has a valid mapping so access the map.
ret = mapvpmw(virt);
err = FALSE;
// Is the default virtual PARTID valid?
elsif MPAMVPMV_EL2<0> == '1' then
// Yes, so use default mapping for vpartid == 0.
ret = MPAMVPM0_EL2<0 +: 16>;
err = FALSE;
// Neither is valid so use default physical PARTID.
else
ret = DefaultPARTID;
err = TRUE;
// Check that the physical PARTID is in-range.
// This physical PARTID came from a virtual mapping entry.
integer partid_max = UInt( MPAMIDR_EL1.PARTID_MAX );
if UInt(ret) > partid_max then
// Out of range, so return default physical PARTID
ret = DefaultPARTID;
err = TRUE;
return (ret, err);
constant PMGtype// MPAMisEnabled
// =============
// Returns TRUE if MPAMisEnabled.
boolean DefaultPMG = 0<7:0>;MPAMisEnabled()
el =HighestEL();
case el of
when EL3 return MPAM3_EL3.MPAMEN == '1';
when EL2 return MPAM2_EL2.MPAMEN == '1';
when EL1 return MPAM1_EL1.MPAMEN == '1';
// GenMPAMcurEL
// ============
// Returns MPAMinfo for the current EL and security state.
// May be called if MPAM is not implemented (but in an version that supports
// MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to
// EL if can and use that to drive MPAM information generation. If mode
// cannot be converted, MPAM is not implemented, or MPAM is disabled return
// default MPAM information for the current security state.
// MPAMisVirtual
// =============
// Returns TRUE if MPAM is configured to be virtual at EL.
MPAMinfoboolean GenMPAMcurEL(MPAMisVirtual(integer el)
return ( MPAMIDR_EL1.HAS_HCR == '1' &&AccTypeEL2Enabled acctype)
bits(2) mpamel;
boolean validEL;
boolean securempam;
boolean InD = acctype IN {AccType_IFETCH, AccType_IC};
if HaveEMPAMExt() then
boolean secure = IsSecure();
securempam = MPAM3_EL3.FORCE_NS == '0' && secure;
if MPAMisEnabled() && (!secure || MPAM3_EL3.SDEFLT == '0') then
if UsingAArch32() then
(validEL, mpamel) = ELFromM32(PSTATE.M);
else
mpamel = PSTATE.EL;
if validEL then
return genMPAM(UInt(mpamel), InD, securempam);
else
securempam = IsSecure();
if HaveMPAMExt() && MPAMisEnabled() then
if UsingAArch32() then
(validEL, mpamel) = ELFromM32(PSTATE.M);
else
validEL = TRUE;
mpamel = PSTATE.EL;
if validEL then
return genMPAM(UInt(mpamel), InD, securempam);
return DefaultMPAMinfo(securempam);() &&
(( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' &&
( HCR_EL2.E2H == '0' || HCR_EL2.TGE == '0' )) ||
( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));
// MAP_vPARTID
// ===========
// Performs conversion of virtual PARTID into physical PARTID
// Contains all of the error checking and implementation
// choices for the conversion.
// genMPAM
// =======
// Returns MPAMinfo for exception level el.
// If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields
// of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields.
// Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise.
(PARTIDtype, boolean)MPAMinfo MAP_vPARTID(genMPAM(integer el, boolean InD, boolean secure)PARTIDtypeMPAMinfo vpartid)
// should not ever be called if EL2 is not implemented
// or is implemented but not enabled in the current
// security state.returnInfo;
PARTIDtype ret;
boolean err;
integer virt =partidel;
boolean perr;
boolean gstplk = (el == 0 && UIntEL2Enabled( vpartid );
integer vpmrmax =() &&
MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0');
integer eff_el = if gstplk then 1 else el;
(partidel, perr) = UIntgenPARTID( MPAMIDR_EL1.VPMR_MAX );
// vpartid_max is largest vpartid supported
integer vpartid_max = (4 * vpmrmax) + 3;
// One of many ways to reduce vpartid to value less than vpartid_max.
if virt > vpartid_max then
virt = virt MOD (vpartid_max+1);
// Check for valid mapping entry.
if MPAMVPMV_EL2<virt> == '1' then
// vpartid has a valid mapping so access the map.
ret =(eff_el, InD); mapvpmwPMGtype(virt);
err = FALSE;
// Is the default virtual PARTID valid?
elsif MPAMVPMV_EL2<0> == '1' then
// Yes, so use default mapping for vpartid == 0.
ret = MPAMVPM0_EL2<0 +: 16>;
err = FALSE;
// Neither is valid so use default physical PARTID.
else
ret =groupel = DefaultPARTIDgenPMG;
err = TRUE;
// Check that the physical PARTID is in-range.
// This physical PARTID came from a virtual mapping entry.
integer partid_max = UInt( MPAMIDR_EL1.PARTID_MAX );
if UInt(ret) > partid_max then
// Out of range, so return default physical PARTID
ret = DefaultPARTID;
err = TRUE;
return (ret, err);(eff_el, InD, perr);
returnInfo.mpam_ns = if secure then '0' else '1';
returnInfo.partid = partidel;
returnInfo.pmg = groupel;
return returnInfo;
// MPAMisEnabled
// =============
// Returns TRUE if MPAMisEnabled.
// genMPAMel
// =========
// Returns MPAMinfo for specified EL in the current security state.
// InD is TRUE for instruction access and FALSE otherwise.
booleanMPAMinfo MPAMisEnabled()
el =genMPAMel(bits(2) el, boolean InD)
boolean secure = HighestELIsSecure();
case el of
when boolean securempam = secure;
if EL3HaveEMPAMExt return MPAM3_EL3.MPAMEN == '1';
when() then
securempam = MPAM3_EL3.FORCE_NS == '0' && secure;
if EL2HaveMPAMExt return MPAM2_EL2.MPAMEN == '1';
when() && () && (!secure || MPAM3_EL3.SDEFLT == '0') then
return genMPAM(UInt(el), InD, securempam);
else
if HaveMPAMExt() && MPAMisEnabled() then
return genMPAM(UInt(el), InD, securempam);
return DefaultMPAMinfoEL1MPAMisEnabled return MPAM1_EL1.MPAMEN == '1';(securempam);
// MPAMisVirtual
// =============
// Returns TRUE if MPAM is configured to be virtual at EL.
// genPARTID
// =========
// Returns physical PARTID and error boolean for exception level el.
// If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and
// otherwise from MPAMel_ELx.PARTID_D.
boolean(PARTIDtype, boolean) MPAMisVirtual(integer el)
return ( MPAMIDR_EL1.HAS_HCR == '1' &&genPARTID(integer el, boolean InD) partidel = getMPAM_PARTID(el, InD);
integer partid_max = UInt(MPAMIDR_EL1.PARTID_MAX);
if UInt(partidel) > partid_max then
return (DefaultPARTID, TRUE);
if MPAMisVirtual(el) then
return MAP_vPARTIDEL2EnabledPARTIDtype() &&
(( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' &&
( HCR_EL2.E2H == '0' || HCR_EL2.TGE == '0' )) ||
( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));(partidel);
else
return (partidel, FALSE);
// genMPAM
// =======
// Returns MPAMinfo for exception level el.
// If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields
// of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields.
// Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise.
// genPMG
// ======
// Returns PMG for exception level el and I- or D-side (InD).
// If PARTID generation (genPARTID) encountered an error, genPMG() should be
// called with partid_err as TRUE.
MPAMinfoPMGtype genMPAM(integer el, boolean InD, boolean secure)genPMG(integer el, boolean InD, boolean partid_err)
integer pmg_max =
MPAMinfoUInt returnInfo;(MPAMIDR_EL1.PMG_MAX);
// It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to
// use the default or if it uses the PMG from getMPAM_PMG.
if partid_err then
return
PARTIDtypeDefaultPMG partidel;
boolean perr;
boolean gstplk = (el == 0 &&; EL2EnabledPMGtype() &&
MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0');
integer eff_el = if gstplk then 1 else el;
(partidel, perr) =groupel = genPARTIDgetMPAM_PMG(eff_el, InD);(el, InD);
if
PMGtypeUInt groupel =(groupel) <= pmg_max then
return groupel;
return genPMGDefaultPMG(eff_el, InD, perr);
returnInfo.mpam_ns = if secure then '0' else '1';
returnInfo.partid = partidel;
returnInfo.pmg = groupel;
return returnInfo;;
// genMPAMel
// =========
// Returns MPAMinfo for specified EL in the current security state.
// InD is TRUE for instruction access and FALSE otherwise.
// getMPAM_PARTID
// ==============
// Returns a PARTID from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PARTID_I field of that
// register. Otherwise, selects the PARTID_D field.
MPAMinfoPARTIDtype genMPAMel(bits(2) el, boolean InD)
boolean secure =getMPAM_PARTID(integer MPAMn, boolean InD) IsSecurePARTIDtype();
boolean securempam = secure;
ifpartid;
boolean el2avail = HaveEMPAMExtEL2Enabled() then
securempam = MPAM3_EL3.FORCE_NS == '0' && secure;
if();
if InD then
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_I;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else HaveMPAMExtZeros() &&();
when 1 partid = MPAM1_EL1.PARTID_I;
when 0 partid = MPAM0_EL1.PARTID_I;
otherwise partid = MPAMisEnabledPARTIDtype() && (!secure || MPAM3_EL3.SDEFLT == '0') then
returnUNKNOWN;
else
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_D;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else genMPAMZeros(();
when 1 partid = MPAM1_EL1.PARTID_D;
when 0 partid = MPAM0_EL1.PARTID_D;
otherwise partid =UIntPARTIDtype(el), InD, securempam);
else
if HaveMPAMExt() && MPAMisEnabled() then
return genMPAM(UInt(el), InD, securempam);
return DefaultMPAMinfo(securempam);UNKNOWN;
return partid;
// genPARTID
// =========
// Returns physical PARTID and error boolean for exception level el.
// If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and
// otherwise from MPAMel_ELx.PARTID_D.
// getMPAM_PMG
// ===========
// Returns a PMG from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PMG_I field of that
// register. Otherwise, selects the PMG_D field.
(PARTIDtype, boolean)PMGtype genPARTID(integer el, boolean InD)getMPAM_PMG(integer MPAMn, boolean InD)
PARTIDtypePMGtype partidel =pmg;
boolean el2avail = getMPAM_PARTIDEL2Enabled(el, InD);
();
integer partid_max = if InD then
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_I;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_I else UIntZeros(MPAMIDR_EL1.PARTID_MAX);
if();
when 1 pmg = MPAM1_EL1.PMG_I;
when 0 pmg = MPAM0_EL1.PMG_I;
otherwise pmg = UIntPMGtype(partidel) > partid_max then
return (UNKNOWN;
else
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_D;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_D elseDefaultPARTIDZeros, TRUE);
if();
when 1 pmg = MPAM1_EL1.PMG_D;
when 0 pmg = MPAM0_EL1.PMG_D;
otherwise pmg = MPAMisVirtualPMGtype(el) then
return MAP_vPARTID(partidel);
else
return (partidel, FALSE);UNKNOWN;
return pmg;
// genPMG
// ======
// Returns PMG for exception level el and I- or D-side (InD).
// If PARTID generation (genPARTID) encountered an error, genPMG() should be
// called with partid_err as TRUE.
// mapvpmw
// =======
// Map a virtual PARTID into a physical PARTID using
// the MPAMVPMn_EL2 registers.
// vpartid is now assumed in-range and valid (checked by caller)
// returns physical PARTID from mapping entry.
PMGtypePARTIDtype genPMG(integer el, boolean InD, boolean partid_err)
integer pmg_max =mapvpmw(integer vpartid)
bits(64) vpmw;
integer wd = vpartid DIV 4;
case wd of
when 0 vpmw = MPAMVPM0_EL2;
when 1 vpmw = MPAMVPM1_EL2;
when 2 vpmw = MPAMVPM2_EL2;
when 3 vpmw = MPAMVPM3_EL2;
when 4 vpmw = MPAMVPM4_EL2;
when 5 vpmw = MPAMVPM5_EL2;
when 6 vpmw = MPAMVPM6_EL2;
when 7 vpmw = MPAMVPM7_EL2;
otherwise vpmw = UIntZeros(MPAMIDR_EL1.PMG_MAX);
// It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to
// use the default or if it uses the PMG from getMPAM_PMG.
if partid_err then
return DefaultPMG;
PMGtype groupel = getMPAM_PMG(el, InD);
if UInt(groupel) <= pmg_max then
return groupel;
return DefaultPMG;(64);
// vpme_lsb selects LSB of field within register
integer vpme_lsb = (vpartid MOD 4) * 16;
return vpmw<vpme_lsb +: 16>;
// getMPAM_PARTID
// ==============
// Returns a PARTID from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PARTID_I field of that
// register. Otherwise, selects the PARTID_D field.
// BranchTo()
// ==========
PARTIDtype// Set program counter to a new address, with a branch type
// In AArch64 state the address might include a tag in the top eight bits. getMPAM_PARTID(integer MPAMn, boolean InD)BranchTo(bits(N) target,
PARTIDtypeBranchType partid;
boolean el2avail =branch_type) EL2EnabledHint_Branch();
if InD then
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_I;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else(branch_type);
if N == 32 then
assert ZerosUsingAArch32();
when 1 partid = MPAM1_EL1.PARTID_I;
when 0 partid = MPAM0_EL1.PARTID_I;
otherwise partid = _PC = PARTIDtypeZeroExtend UNKNOWN;
(target);
else
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_D;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else assert N == 64 && ! ZerosUsingAArch32();
when 1 partid = MPAM1_EL1.PARTID_D;
when 0 partid = MPAM0_EL1.PARTID_D;
otherwise partid = _PC = PARTIDtypeAArch64.BranchAddr UNKNOWN;
return partid;(target<63:0>);
return;
// getMPAM_PMG
// ===========
// Returns a PMG from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PMG_I field of that
// register. Otherwise, selects the PMG_D field.
// BranchToAddr()
// ==============
PMGtype// Set program counter to a new address, with a branch type
// In AArch64 state the address does not include a tag in the top eight bits. getMPAM_PMG(integer MPAMn, boolean InD)BranchToAddr(bits(N) target,
PMGtypeBranchType pmg;
boolean el2avail =branch_type) EL2EnabledHint_Branch();
if InD then
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_I;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_I else(branch_type);
if N == 32 then
assert ZerosUsingAArch32();
when 1 pmg = MPAM1_EL1.PMG_I;
when 0 pmg = MPAM0_EL1.PMG_I;
otherwise pmg = _PC = PMGtypeZeroExtend UNKNOWN;
(target);
else
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_D;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_D else assert N == 64 && ! ZerosUsingAArch32();
when 1 pmg = MPAM1_EL1.PMG_D;
when 0 pmg = MPAM0_EL1.PMG_D;
otherwise pmg = PMGtype UNKNOWN;
return pmg;();
_PC = target<63:0>;
return;
// mapvpmw
// =======
// Map a virtual PARTID into a physical PARTID using
// the MPAMVPMn_EL2 registers.
// vpartid is now assumed in-range and valid (checked by caller)
// returns physical PARTID from mapping entry.
PARTIDtypeenumeration mapvpmw(integer vpartid)
bits(64) vpmw;
integer wd = vpartid DIV 4;
case wd of
when 0 vpmw = MPAMVPM0_EL2;
when 1 vpmw = MPAMVPM1_EL2;
when 2 vpmw = MPAMVPM2_EL2;
when 3 vpmw = MPAMVPM3_EL2;
when 4 vpmw = MPAMVPM4_EL2;
when 5 vpmw = MPAMVPM5_EL2;
when 6 vpmw = MPAMVPM6_EL2;
when 7 vpmw = MPAMVPM7_EL2;
otherwise vpmw =BranchType { BranchType_DIRCALL, // Direct Branch with link
BranchType_INDCALL, // Indirect Branch with link
BranchType_ERET, // Exception return (indirect)
BranchType_DBGEXIT, // Exit from Debug state
BranchType_RET, // Indirect branch with function return hint
BranchType_DIR, // Direct branch
BranchType_INDIR, // Indirect branch
BranchType_EXCEPTION, // Exception entry
BranchType_RESET, // Reset
Zeros(64);
// vpme_lsb selects LSB of field within register
integer vpme_lsb = (vpartid MOD 4) * 16;
return vpmw<vpme_lsb +: 16>;BranchType_UNKNOWN}; // Other
// BranchTo()
// ==========
// Set program counter to a new address, with a branch type
// In AArch64 state the address might include a tag in the top eight bits.// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing
// the next instruction.
BranchTo(bits(N) target,Hint_Branch( BranchType branch_type)hint);
Hint_Branch(branch_type);
if N == 32 then
assert UsingAArch32();
_PC = ZeroExtend(target);
else
assert N == 64 && !UsingAArch32();
_PC = AArch64.BranchAddr(target<63:0>);
return;
// BranchToAddr()
// ==============
// Set program counter to a new address, with a branch type
// In AArch64 state the address does not include a tag in the top eight bits.// Return address of the sequentially next instruction.
bits(N)
BranchToAddr(bits(N) target,NextInstrAddr(); BranchType branch_type)
Hint_Branch(branch_type);
if N == 32 then
assert UsingAArch32();
_PC = ZeroExtend(target);
else
assert N == 64 && !UsingAArch32();
_PC = target<63:0>;
return;
enumeration// Reset the External Debug registers in the Core power domain. BranchType {ResetExternalDebugRegisters(boolean cold_reset);
BranchType_DIRCALL, // Direct Branch with link
BranchType_INDCALL, // Indirect Branch with link
BranchType_ERET, // Exception return (indirect)
BranchType_DBGEXIT, // Exit from Debug state
BranchType_RET, // Indirect branch with function return hint
BranchType_DIR, // Direct branch
BranchType_INDIR, // Indirect branch
BranchType_EXCEPTION, // Exception entry
BranchType_RESET, // Reset
BranchType_UNKNOWN}; // Other
// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing
// the next instruction.// ThisInstrAddr()
// ===============
// Return address of the current instruction.
bits(N)
Hint_Branch(ThisInstrAddr()
assert N == 64 || (N == 32 &&BranchTypeUsingAArch32 hint);());
return _PC<N-1:0>;
// Return address of the sequentially next instruction.
bits(N)bits(64) _PC; NextInstrAddr();
// Reset the External Debug registers in the Core power domain.array bits(64) _R[0..30];
ResetExternalDebugRegisters(boolean cold_reset);
// ThisInstrAddr()
// ===============
// Return address of the current instruction.
// SPSR[] - non-assignment form
// ============================
bits(N) ThisInstrAddr()
assert N == 64 || (N == 32 &&SPSR[]
bits(N) result;
if UsingAArch32());
return _PC<N-1:0>;() then
assert N == 32;
case PSTATE.M of
whenM32_FIQ result = SPSR_fiq<N-1:0>;
when M32_IRQ result = SPSR_irq<N-1:0>;
when M32_Svc result = SPSR_svc<N-1:0>;
when M32_Monitor result = SPSR_mon<N-1:0>;
when M32_Abort result = SPSR_abt<N-1:0>;
when M32_Hyp result = SPSR_hyp<N-1:0>;
when M32_Undef result = SPSR_und<N-1:0>;
otherwise Unreachable();
else
assert N == 64;
case PSTATE.EL of
when EL1 result = SPSR_EL1<N-1:0>;
when EL2 result = SPSR_EL2<N-1:0>;
when EL3 result = SPSR_EL3<N-1:0>;
otherwise Unreachable();
return result;
// SPSR[] - assignment form
// ========================
SPSR[] = bits(N) value
if UsingAArch32() then
assert N == 32;
case PSTATE.M of
when M32_FIQ SPSR_fiq = ZeroExtend(value);
when M32_IRQ SPSR_irq = ZeroExtend(value);
when M32_Svc SPSR_svc = ZeroExtend(value);
when M32_Monitor SPSR_mon = ZeroExtend(value);
when M32_Abort SPSR_abt = ZeroExtend(value);
when M32_Hyp SPSR_hyp = ZeroExtend(value);
when M32_Undef SPSR_und = ZeroExtend(value);
otherwise Unreachable();
else
assert N == 64;
case PSTATE.EL of
when EL1 SPSR_EL1 = ZeroExtend(value);
when EL2 SPSR_EL2 = ZeroExtend(value);
when EL3 SPSR_EL3 = ZeroExtend(value);
otherwise Unreachable();
return;
bits(64) _PC;enumerationArchVersion {
ARMv8p0
, ARMv8p1
, ARMv8p2
, ARMv8p3
, ARMv8p4
, ARMv8p5
, ARMv8p6
, ARMv8p7
};
array bits(64) _R[0..30];// BranchTargetCheck()
// ===================
// This function is executed checks if the current instruction is a valid target for a branch
// taken into, or inside, a guarded page. It is executed on every cycle once the current
// instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been
// determined for the current instruction.BranchTargetCheck()
assert HaveBTIExt() && !UsingAArch32();
// The branch target check considers two state variables:
// * InGuardedPage, which is evaluated during instruction fetch.
// * BTypeCompatible, which is evaluated during instruction decode.
if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then
bits(64) pc = ThisInstrAddr();
AArch64.BranchTargetException(pc<51:0>);
boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr();
boolean bti_instr = AArch64.ExecutingBTIInstr();
// PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE.
if !(branch_instr || bti_instr) then
BTypeNext = '00';
// SPSR[] - non-assignment form
// ============================
bits(N)// ClearEventRegister()
// ====================
// Clear the Event Register of this PE. SPSR[]
bits(N) result;
ifClearEventRegister()
EventRegister = '0';
return; UsingAArch32() then
assert N == 32;
case PSTATE.M of
when M32_FIQ result = SPSR_fiq<N-1:0>;
when M32_IRQ result = SPSR_irq<N-1:0>;
when M32_Svc result = SPSR_svc<N-1:0>;
when M32_Monitor result = SPSR_mon<N-1:0>;
when M32_Abort result = SPSR_abt<N-1:0>;
when M32_Hyp result = SPSR_hyp<N-1:0>;
when M32_Undef result = SPSR_und<N-1:0>;
otherwise Unreachable();
else
assert N == 64;
case PSTATE.EL of
when EL1 result = SPSR_EL1<N-1:0>;
when EL2 result = SPSR_EL2<N-1:0>;
when EL3 result = SPSR_EL3<N-1:0>;
otherwise Unreachable();
return result;
// SPSR[] - assignment form
// ========================
SPSR[] = bits(N) value
if UsingAArch32() then
assert N == 32;
case PSTATE.M of
when M32_FIQ SPSR_fiq = ZeroExtend(value);
when M32_IRQ SPSR_irq = ZeroExtend(value);
when M32_Svc SPSR_svc = ZeroExtend(value);
when M32_Monitor SPSR_mon = ZeroExtend(value);
when M32_Abort SPSR_abt = ZeroExtend(value);
when M32_Hyp SPSR_hyp = ZeroExtend(value);
when M32_Undef SPSR_und = ZeroExtend(value);
otherwise Unreachable();
else
assert N == 64;
case PSTATE.EL of
when EL1 SPSR_EL1 = ZeroExtend(value);
when EL2 SPSR_EL2 = ZeroExtend(value);
when EL3 SPSR_EL3 = ZeroExtend(value);
otherwise Unreachable();
return;
enumeration// Clear a pending physical SError interrupt. ArchVersion {ClearPendingPhysicalSError();
ARMv8p0
, ARMv8p1
, ARMv8p2
, ARMv8p3
, ARMv8p4
, ARMv8p5
, ARMv8p6
, ARMv8p7
};
// BranchTargetCheck()
// ===================
// This function is executed checks if the current instruction is a valid target for a branch
// taken into, or inside, a guarded page. It is executed on every cycle once the current
// instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been
// determined for the current instruction.// Clear a pending virtual SError interrupt.
BranchTargetCheck()
assertClearPendingVirtualSError(); HaveBTIExt() && !UsingAArch32();
// The branch target check considers two state variables:
// * InGuardedPage, which is evaluated during instruction fetch.
// * BTypeCompatible, which is evaluated during instruction decode.
if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then
bits(64) pc = ThisInstrAddr();
AArch64.BranchTargetException(pc<51:0>);
boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr();
boolean bti_instr = AArch64.ExecutingBTIInstr();
// PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE.
if !(branch_instr || bti_instr) then
BTypeNext = '00';
// ClearEventRegister()
// ====================
// Clear the Event Register of this PE.// ConditionHolds()
// ================
// Return TRUE iff COND currently holds
boolean
ClearEventRegister()
EventRegister = '0';
return;ConditionHolds(bits(4) cond)
// Evaluate base condition.
case cond<3:1> of
when '000' result = (PSTATE.Z == '1'); // EQ or NE
when '001' result = (PSTATE.C == '1'); // CS or CC
when '010' result = (PSTATE.N == '1'); // MI or PL
when '011' result = (PSTATE.V == '1'); // VS or VC
when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS
when '101' result = (PSTATE.N == PSTATE.V); // GE or LT
when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE
when '111' result = TRUE; // AL
// Condition flag values in the set '111x' indicate always true
// Otherwise, invert condition if necessary.
if cond<0> == '1' && cond != '1111' then
result = !result;
return result;
// Clear a pending physical SError interrupt.
ClearPendingPhysicalSError();ConsumptionOfSpeculativeDataBarrier();
// Clear a pending virtual SError interrupt.// CurrentInstrSet()
// =================
InstrSet
ClearPendingVirtualSError();CurrentInstrSet()
ifUsingAArch32() then
result = if PSTATE.T == '0' then InstrSet_A32 else InstrSet_T32;
// PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted.
else
result = InstrSet_A64;
return result;
// ConditionHolds()
// ================
// Return TRUE iff COND currently holds
// CurrentPL()
// ===========
booleanPrivilegeLevel ConditionHolds(bits(4) cond)
// Evaluate base condition.
case cond<3:1> of
when '000' result = (PSTATE.Z == '1'); // EQ or NE
when '001' result = (PSTATE.C == '1'); // CS or CC
when '010' result = (PSTATE.N == '1'); // MI or PL
when '011' result = (PSTATE.V == '1'); // VS or VC
when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS
when '101' result = (PSTATE.N == PSTATE.V); // GE or LT
when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE
when '111' result = TRUE; // AL
// Condition flag values in the set '111x' indicate always true
// Otherwise, invert condition if necessary.
if cond<0> == '1' && cond != '1111' then
result = !result;
return result;CurrentPL()
returnPLOfEL(PSTATE.EL);
constant bits(2) EL3 = '11';
constant bits(2) EL2 = '10';
constant bits(2) EL1 = '01';
constant bits(2) ConsumptionOfSpeculativeDataBarrier();EL0 = '00';
// CurrentInstrSet()
// =================
// EL2Enabled()
// ============
// Returns TRUE if EL2 is present and executing
// - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or
// - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or
// - when EL3 is not implemented.
InstrSetboolean CurrentInstrSet()
ifEL2Enabled()
return UsingAArch32HaveEL() then
result = if PSTATE.T == '0' then( InstrSet_A32EL2 else) && (! InstrSet_T32HaveEL;
// PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted.
else
result =( ) || SCR_EL3.NS == '1' || IsSecureEL2EnabledInstrSet_A64EL3;
return result;());
// CurrentPL()
// ELFromM32()
// ===========
PrivilegeLevel(boolean,bits(2)) CurrentPL()
returnELFromM32(bits(5) mode)
// Convert an AArch32 mode encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation
// and the current value of SCR.NS/SCR_EL3.NS.
// 'EL' is the Exception level decoded from 'mode'.
bits(2) el;
boolean valid = ! (mode); // Check for modes that are not valid for this implementation
case mode of
when M32_Monitor
el = EL3;
when M32_Hyp
el = EL2;
valid = valid && (!HaveEL(EL3) || SCR_GEN[].NS == '1');
when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
el = (if HaveEL(EL3) && HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1);
when M32_User
el = EL0PLOfELBadMode(PSTATE.EL);;
otherwise
valid = FALSE; // Passed an illegal mode value
if !valid then el = bits(2) UNKNOWN;
return (valid, el);
constant bits(2)// ELFromSPSR()
// ============
// Convert an SPSR value encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state.
// 'EL' is the Exception level decoded from 'spsr'.
(boolean,bits(2)) EL3 = '11';
constant bits(2)ELFromSPSR(bits(N) spsr)
if spsr<4> == '0' then // AArch64 state
el = spsr<3:2>;
if EL2 = '10';
constant bits(2)() then // No AArch64 support
valid = FALSE;
elsif ! EL1 = '01';
constant bits(2)(el) then // Exception level not implemented
valid = FALSE;
elsif spsr<1> == '1' then // M[1] must be 0
valid = FALSE;
elsif el == && spsr<0> == '1' then // for EL0, M[0] must be 0
valid = FALSE;
elsif el == EL2 && HaveEL(EL3) && !IsSecureEL2Enabled() && SCR_EL3.NS == '0' then
valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state
else
valid = TRUE;
elsif HaveAnyAArch32() then // AArch32 state
(valid, el) = ELFromM32EL0 = '00';(spsr<4:0>);
else
valid = FALSE;
if !valid then el = bits(2) UNKNOWN;
return (valid,el);
// EL2Enabled()
// ELIsInHost()
// ============
// Returns TRUE if EL2 is present and executing
// - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or
// - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or
// - when EL3 is not implemented.
boolean EL2Enabled()
returnELIsInHost(bits(2) el)
if ! HaveELHaveVirtHostExt(() ||ELUsingAArch32(EL2) && (!) then
return FALSE;
case el of
whenHaveEL(EL3) || SCR_EL3.NS == '1' ||return FALSE;
when
return HCR_EL2.E2H == '1';
when EL1
return FALSE;
when EL0
return EL2Enabled() && HCR_EL2.<E2H,TGE> == '11';
otherwise
UnreachableIsSecureEL2EnabledEL2());();
// ELFromM32()
// ===========
// ELStateUsingAArch32()
// =====================
(boolean,bits(2))boolean ELFromM32(bits(5) mode)
// Convert an AArch32 mode encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation
// and the current value of SCR.NS/SCR_EL3.NS.
// 'EL' is the Exception level decoded from 'mode'.
bits(2) el;
boolean valid = !ELStateUsingAArch32(bits(2) el, boolean secure)
// See ELStateUsingAArch32K() for description. Must only be called in circumstances where
// result is valid (typically, that means 'el IN {EL1,EL2,EL3}').
(known, aarch32) =BadModeELStateUsingAArch32K(mode); // Check for modes that are not valid for this implementation
case mode of
when M32_Monitor
el = EL3;
when M32_Hyp
el = EL2;
valid = valid && (!HaveEL(EL3) || SCR_GEN[].NS == '1');
when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
el = (if HaveEL(EL3) && HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1);
when M32_User
el = EL0;
otherwise
valid = FALSE; // Passed an illegal mode value
if !valid then el = bits(2) UNKNOWN;
return (valid, el);(el, secure);
assert known;
return aarch32;
// ELFromSPSR()
// ============
// ELStateUsingAArch32K()
// ======================
// Convert an SPSR value encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state.
// 'EL' is the Exception level decoded from 'spsr'.
(boolean,bits(2))(boolean,boolean) ELFromSPSR(bits(N) spsr)
if spsr<4> == '0' then // AArch64 state
el = spsr<3:2>;
ifELStateUsingAArch32K(bits(2) el, boolean secure)
// Returns (known, aarch32):
// 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is
// using AArch64, since it cannot determine the state of EL0; TRUE otherwise.
// 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise.
if ! HaveAArch32EL(el) then
return (TRUE, FALSE); // Exception level is using AArch64
elsif secure && el == EL2 then
return (TRUE, FALSE); // Secure EL2 is using AArch64
elsif HighestELUsingAArch32() then // No AArch64 support
valid = FALSE;
elsif !() then
return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32
elsif el ==HighestEL() then
return (TRUE, FALSE); // This is highest Exception level, so is using AArch64
// Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64
boolean aarch32 = boolean UNKNOWN;
boolean known = TRUE;
aarch32_below_el3 = HaveEL(el) then // Exception level not implemented
valid = FALSE;
elsif spsr<1> == '1' then // M[1] must be 0
valid = FALSE;
elsif el ==( EL0EL3 && spsr<0> == '1' then // for EL0, M[0] must be 0
valid = FALSE;
elsif el ==) && SCR_EL3.RW == '0' && (!secure || ! EL2HaveSecureEL2Ext &&() || SCR_EL3.EEL2 == '0');
aarch32_at_el1 = (aarch32_below_el3 || ( HaveEL(EL2) &&
((HaveSecureEL2Ext() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' &&
!(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && HaveVirtHostExt())));
if el == EL0 && !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE
if PSTATE.EL == EL0 then
aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE
else
known = FALSE; // EL0 state is UNKNOWN
else
aarch32 = (aarch32_below_el3 && el != EL3) && !) || (aarch32_at_el1 && el IN {IsSecureEL2EnabledEL1() && SCR_EL3.NS == '0' then
valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state
else
valid = TRUE;
elsif, HaveAnyAArch32EL0() then // AArch32 state
(valid, el) = ELFromM32(spsr<4:0>);
else
valid = FALSE;
});
if !valid then el = bits(2) UNKNOWN;
return (valid,el); if !known then aarch32 = boolean UNKNOWN;
return (known, aarch32);
// ELIsInHost()
// ============
// ELUsingAArch32()
// ================
boolean ELIsInHost(bits(2) el)
if !ELUsingAArch32(bits(2) el)
returnHaveVirtHostExtELStateUsingAArch32() ||(el, ELUsingAArch32IsSecureBelowEL3(EL2) then
return FALSE;
case el of
when EL3
return FALSE;
when EL2
return HCR_EL2.E2H == '1';
when EL1
return FALSE;
when EL0
return EL2Enabled() && HCR_EL2.<E2H,TGE> == '11';
otherwise
Unreachable();());
// ELStateUsingAArch32()
// =====================
// ELUsingAArch32K()
// =================
boolean(boolean,boolean) ELStateUsingAArch32(bits(2) el, boolean secure)
// See ELStateUsingAArch32K() for description. Must only be called in circumstances where
// result is valid (typically, that means 'el IN {EL1,EL2,EL3}').
(known, aarch32) =ELUsingAArch32K(bits(2) el)
return ELStateUsingAArch32K(el, secure);
assert known;
return aarch32;(el,IsSecureBelowEL3());
// ELStateUsingAArch32K()
// ======================
(boolean,boolean)// Terminate processing of the current instruction. ELStateUsingAArch32K(bits(2) el, boolean secure)
// Returns (known, aarch32):
// 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is
// using AArch64, since it cannot determine the state of EL0; TRUE otherwise.
// 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise.
if !EndOfInstruction();HaveAArch32EL(el) then
return (TRUE, FALSE); // Exception level is using AArch64
elsif secure && el == EL2 then
return (TRUE, FALSE); // Secure EL2 is using AArch64
elsif HighestELUsingAArch32() then
return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32
elsif el == HighestEL() then
return (TRUE, FALSE); // This is highest Exception level, so is using AArch64
// Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64
boolean aarch32 = boolean UNKNOWN;
boolean known = TRUE;
aarch32_below_el3 = HaveEL(EL3) && SCR_EL3.RW == '0' && (!secure || !HaveSecureEL2Ext() || SCR_EL3.EEL2 == '0');
aarch32_at_el1 = (aarch32_below_el3 || (HaveEL(EL2) &&
((HaveSecureEL2Ext() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' &&
!(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && HaveVirtHostExt())));
if el == EL0 && !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE
if PSTATE.EL == EL0 then
aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE
else
known = FALSE; // EL0 state is UNKNOWN
else
aarch32 = (aarch32_below_el3 && el != EL3) || (aarch32_at_el1 && el IN {EL1,EL0});
if !known then aarch32 = boolean UNKNOWN;
return (known, aarch32);
// ELUsingAArch32()
// ================
boolean// PE enters a low-power state. ELUsingAArch32(bits(2) el)
returnEnterLowPowerState(); ELStateUsingAArch32(el, IsSecureBelowEL3());
// ELUsingAArch32K()
// =================
(boolean,boolean)bits(1) EventRegister; ELUsingAArch32K(bits(2) el)
return ELStateUsingAArch32K(el, IsSecureBelowEL3());
// Terminate processing of the current instruction.enumeration
EndOfInstruction();ExceptionalOccurrenceTargetState {AArch32_NonDebugState,
AArch64_NonDebugState,
DebugState
};
// PE enters a low-power state.// Returns TRUE if there is any pending physical FIQ
boolean
EnterLowPowerState();FIQPending();
bits(1) EventRegister;// GetPSRFromPSTATE()
// ==================
// Return a PSR value which represents the current PSTATE
bits(N)GetPSRFromPSTATE(ExceptionalOccurrenceTargetState targetELState)
if UsingAArch32() && (targetELState IN {AArch32_NonDebugState, DebugState}) then
assert N == 32;
else
assert N == 64;
bits(N) spsr = Zeros();
spsr<31:28> = PSTATE.<N,Z,C,V>;
if HavePANExt() then spsr<22> = PSTATE.PAN;
spsr<20> = PSTATE.IL;
if PSTATE.nRW == '1' then // AArch32 state
spsr<27> = PSTATE.Q;
spsr<26:25> = PSTATE.IT<1:0>;
if HaveSSBSExt() then spsr<23> = PSTATE.SSBS;
if HaveDITExt() then
if targetELState == AArch32_NonDebugState then
spsr<21> = PSTATE.DIT;
else //AArch64_NonDebugState or DebugState
spsr<24> = PSTATE.DIT;
if targetELState IN {AArch64_NonDebugState, DebugState} then
spsr<21> = PSTATE.SS;
spsr<19:16> = PSTATE.GE;
spsr<15:10> = PSTATE.IT<7:2>;
spsr<9> = PSTATE.E;
spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state
spsr<5> = PSTATE.T;
assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator
spsr<4:0> = PSTATE.M;
else // AArch64 state
if HaveMTEExt() then spsr<25> = PSTATE.TCO;
if HaveDITExt() then spsr<24> = PSTATE.DIT;
if HaveUAOExt() then spsr<23> = PSTATE.UAO;
spsr<21> = PSTATE.SS;
if HaveSSBSExt() then spsr<12> = PSTATE.SSBS;
if HaveBTIExt() then spsr<11:10> = PSTATE.BTYPE;
spsr<9:6> = PSTATE.<D,A,I,F>;
spsr<4> = PSTATE.nRW;
spsr<3:2> = PSTATE.EL;
spsr<0> = PSTATE.SP;
return spsr;
enumeration// HasArchVersion()
// ================
// Returns TRUE if the implemented architecture includes the extensions defined in the specified
// architecture version.
boolean ExceptionalOccurrenceTargetState {HasArchVersion(
AArch32_NonDebugState,version)
return version ==
AArch64_NonDebugState,
DebugState
};|| boolean IMPLEMENTATION_DEFINED;
// Returns TRUE if there is any pending physical FIQ
// HaveAArch32EL()
// ===============
boolean FIQPending();HaveAArch32EL(bits(2) el)
// Return TRUE if Exception level 'el' supports AArch32 in this implementation
if !HaveEL(el) then
return FALSE; // The Exception level is not implemented
elsif !HaveAnyAArch32() then
return FALSE; // No Exception level can use AArch32
elsif HighestELUsingAArch32() then
return TRUE; // All Exception levels are using AArch32
elsif el == HighestEL() then
return FALSE; // The highest Exception level is using AArch64
elsif el == EL0 then
return TRUE; // EL0 must support using AArch32 if any AArch32
return boolean IMPLEMENTATION_DEFINED;
// GetPSRFromPSTATE()
// ==================
// Return a PSR value which represents the current PSTATE
// HaveAnyAArch32()
// ================
// Return TRUE if AArch32 state is supported at any Exception level
bits(N)boolean GetPSRFromPSTATE(HaveAnyAArch32()
return boolean IMPLEMENTATION_DEFINED;ExceptionalOccurrenceTargetState targetELState)
if UsingAArch32() && (targetELState IN {AArch32_NonDebugState, DebugState}) then
assert N == 32;
else
assert N == 64;
bits(N) spsr = Zeros();
spsr<31:28> = PSTATE.<N,Z,C,V>;
if HavePANExt() then spsr<22> = PSTATE.PAN;
spsr<20> = PSTATE.IL;
if PSTATE.nRW == '1' then // AArch32 state
spsr<27> = PSTATE.Q;
spsr<26:25> = PSTATE.IT<1:0>;
if HaveSSBSExt() then spsr<23> = PSTATE.SSBS;
if HaveDITExt() then
if targetELState == AArch32_NonDebugState then
spsr<21> = PSTATE.DIT;
else //AArch64_NonDebugState or DebugState
spsr<24> = PSTATE.DIT;
if targetELState IN {AArch64_NonDebugState, DebugState} then
spsr<21> = PSTATE.SS;
spsr<19:16> = PSTATE.GE;
spsr<15:10> = PSTATE.IT<7:2>;
spsr<9> = PSTATE.E;
spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state
spsr<5> = PSTATE.T;
assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator
spsr<4:0> = PSTATE.M;
else // AArch64 state
if HaveMTEExt() then spsr<25> = PSTATE.TCO;
if HaveDITExt() then spsr<24> = PSTATE.DIT;
if HaveUAOExt() then spsr<23> = PSTATE.UAO;
spsr<21> = PSTATE.SS;
if HaveSSBSExt() then spsr<12> = PSTATE.SSBS;
if HaveBTIExt() then spsr<11:10> = PSTATE.BTYPE;
spsr<9:6> = PSTATE.<D,A,I,F>;
spsr<4> = PSTATE.nRW;
spsr<3:2> = PSTATE.EL;
spsr<0> = PSTATE.SP;
return spsr;
// HasArchVersion()
// HaveAnyAArch64()
// ================
// Returns TRUE if the implemented architecture includes the extensions defined in the specified
// architecture version.
// Return TRUE if AArch64 state is supported at any Exception level
boolean HasArchVersion(HaveAnyAArch64()
return !ArchVersionHighestELUsingAArch32 version)
return version == ARMv8p0 || boolean IMPLEMENTATION_DEFINED;();
// HaveAArch32EL()
// ===============
// HaveEL()
// ========
// Return TRUE if Exception level 'el' is supported
boolean HaveAArch32EL(bits(2) el)
// Return TRUE if Exception level 'el' supports AArch32 in this implementation
if !HaveEL(bits(2) el)
if el IN {HaveELEL1(el) then
return FALSE; // The Exception level is not implemented
elsif !HaveAnyAArch32() then
return FALSE; // No Exception level can use AArch32
elsif HighestELUsingAArch32() then
return TRUE; // All Exception levels are using AArch32
elsif el == HighestEL() then
return FALSE; // The highest Exception level is using AArch64
elsif el ==, EL0 then
return TRUE; // EL0 must support using AArch32 if any AArch32
} then
return TRUE; // EL1 and EL0 must exist
return boolean IMPLEMENTATION_DEFINED;
// HaveAnyAArch32()
// ================
// Return TRUE if AArch32 state is supported at any Exception level
// HaveELUsingSecurityState()
// ==========================
// Returns TRUE if Exception level 'el' with Security state 'secure' is supported,
// FALSE otherwise.
boolean HaveAnyAArch32()
return boolean IMPLEMENTATION_DEFINED;HaveELUsingSecurityState(bits(2) el, boolean secure)
case el of
whenEL3
assert secure;
return HaveEL(EL3);
when EL2
if secure then
return HaveEL(EL2) && HaveSecureEL2Ext();
else
return HaveEL(EL2);
otherwise
return (HaveEL(EL3) ||
(secure == boolean IMPLEMENTATION_DEFINED "Secure-only implementation"));
// HaveAnyAArch64()
// ================
// Return TRUE if AArch64 state is supported at any Exception level
// HaveFP16Ext()
// =============
// Return TRUE if FP16 extension is supported
boolean HaveAnyAArch64()
return !HaveFP16Ext()
return boolean IMPLEMENTATION_DEFINED;HighestELUsingAArch32();
// HaveEL()
// ========
// Return TRUE if Exception level 'el' is supported
// HighestEL()
// ===========
// Returns the highest implemented Exception level.
booleanbits(2) HaveEL(bits(2) el)
if el IN {HighestEL()
ifHaveEL(EL3) then
return EL3;
elsif HaveEL(EL2) then
return EL2;
else
return EL1,EL0} then
return TRUE; // EL1 and EL0 must exist
return boolean IMPLEMENTATION_DEFINED;;
// HaveELUsingSecurityState()
// ==========================
// Returns TRUE if Exception level 'el' with Security state 'secure' is supported,
// FALSE otherwise.
// HighestELUsingAArch32()
// =======================
// Return TRUE if configured to boot into AArch32 operation
boolean HaveELUsingSecurityState(bits(2) el, boolean secure)
case el of
whenHighestELUsingAArch32()
if ! EL3HaveAnyAArch32
assert secure;
return HaveEL(EL3);
when EL2
if secure then
return HaveEL(EL2) && HaveSecureEL2Ext();
else
return HaveEL(EL2);
otherwise
return (HaveEL(EL3) ||
(secure == boolean IMPLEMENTATION_DEFINED "Secure-only implementation"));() then return FALSE;
return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH
// HaveFP16Ext()
// =============
// Return TRUE if FP16 extension is supported
boolean// Provides a hint to close any gathering occurring within the micro-architecture. HaveFP16Ext()
return boolean IMPLEMENTATION_DEFINED;Hint_DGH();
// HighestEL()
// ===========
// Returns the highest implemented Exception level.
bits(2)// Hint_WFE()
// ==========
// Provides a hint indicating that the PE can enter a low-power state
// and remain there until a wakeup event occurs or, for WFET, a local
// timeout event is generated when the virtual timer value equals or
// exceeds the supplied threshold value. HighestEL()
Hint_WFE(integer localtimeout)
if IsEventRegisterSet() then
ClearEventRegister();
else
trap = FALSE;
if PSTATE.EL == EL0 then
// Check for traps described by the OS which may be EL1 or EL2.
if HaveTWEDExt() then
sctlr = SCTLR[];
trap = sctlr.nTWE == '0';
target_el = EL1;
else
AArch64.CheckForWFxTrap(EL1, TRUE);
if !trap && PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() then
// Check for traps described by the Hypervisor.
if HaveTWEDExt() then
trap = HCR_EL2.TWE == '1';
target_el = EL2;
else
AArch64.CheckForWFxTrap(EL2, TRUE);
if !trap && HaveEL(EL3) then
return) && PSTATE.EL != EL3;
elsifthen
// Check for traps described by the Secure Monitor.
if HaveELHaveTWEDExt(() then
trap = SCR_EL3.TWE == '1';
target_el =EL2EL3) then
return;
else EL2AArch64.CheckForWFxTrap;
else
return( , TRUE);
if trap && PSTATE.EL != EL3 then
(delay_enabled, delay) = WFETrapDelay(target_el); // (If trap delay is enabled, Delay amount)
if !WaitForEventUntilDelay(delay_enabled, delay) then
// Event did not arrive before delay expired
AArch64.WFxTrap(target_el, TRUE); // Trap WFE
else
WaitForEventEL1EL3;(localtimeout);
// HighestELUsingAArch32()
// =======================
// Return TRUE if configured to boot into AArch32 operation
boolean// Hint_WFI()
// ==========
// Provides a hint indicating that the PE can enter a low-power state and
// remain there until a wakeup event occurs or, for WFIT, a local timeout
// event is generated when the virtual timer value equals or exceeds the
// supplied threshold value. HighestELUsingAArch32()
Hint_WFI(integer localtimeout)
if !() then
if PSTATE.EL == EL0 then
// Check for traps described by the OS which may be EL1 or EL2.
AArch64.CheckForWFxTrap(EL1, FALSE);
if PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() then
// Check for traps described by the Hypervisor.
AArch64.CheckForWFxTrap(EL2, FALSE);
if HaveEL(EL3) && PSTATE.EL != EL3 then
// Check for traps described by the Secure Monitor.
AArch64.CheckForWFxTrap(EL3, FALSE);
WaitForInterruptHaveAnyAArch32InterruptPending() then return FALSE;
return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH(localtimeout);
// Provides a hint to close any gathering occurring within the micro-architecture.// Provides a hint that the task performed by a thread is of low
// importance so that it could yield to improve overall performance.
Hint_DGH();Hint_Yield();
// Hint_WFE()
// ==========
// Provides a hint indicating that the PE can enter a low-power state
// and remain there until a wakeup event occurs or, for WFET, a local
// timeout event is generated when the virtual timer value equals or
// exceeds the supplied threshold value.// Returns TRUE if there is any pending physical IRQ
boolean
Hint_WFE(integer localtimeout,IRQPending(); WFxType wfxtype)
if IsEventRegisterSet() then
ClearEventRegister();
else
trap = FALSE;
if PSTATE.EL == EL0 then
// Check for traps described by the OS which may be EL1 or EL2.
if HaveTWEDExt() then
sctlr = SCTLR[];
trap = sctlr.nTWE == '0';
target_el = EL1;
else
AArch64.CheckForWFxTrap(EL1, wfxtype);
if !trap && PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() then
// Check for traps described by the Hypervisor.
if HaveTWEDExt() then
trap = HCR_EL2.TWE == '1';
target_el = EL2;
else
AArch64.CheckForWFxTrap(EL2, wfxtype);
if !trap && HaveEL(EL3) && PSTATE.EL != EL3 then
// Check for traps described by the Secure Monitor.
if HaveTWEDExt() then
trap = SCR_EL3.TWE == '1';
target_el = EL3;
else
AArch64.CheckForWFxTrap(EL3, wfxtype);
if trap && PSTATE.EL != EL3 then
(delay_enabled, delay) = WFETrapDelay(target_el); // (If trap delay is enabled, Delay amount)
if !WaitForEventUntilDelay(delay_enabled, delay) then
// Event did not arrive before delay expired
AArch64.WFxTrap(wfxtype, target_el); // Trap WFE
else
WaitForEvent(localtimeout);
// Hint_WFI()
// ==========
// Provides a hint indicating that the PE can enter a low-power state and
// remain there until a wakeup event occurs or, for WFIT, a local timeout
// event is generated when the virtual timer value equals or exceeds the
// supplied threshold value.// IllegalExceptionReturn()
// ========================
boolean
Hint_WFI(integer localtimeout,IllegalExceptionReturn(bits(N) spsr)
// Check for illegal return:
// * To an unimplemented Exception level.
// * To EL2 in Secure state, when SecureEL2 is not enabled.
// * To EL0 using AArch64 state, with SPSR.M[0]==1.
// * To AArch64 state with SPSR.M[1]==1.
// * To AArch32 state with an illegal value of SPSR.M.
(valid, target) = WFxTypeELFromSPSR wfxtype)
if !(spsr);
if !valid then return TRUE;
// Check for return to higher Exception level
ifInterruptPendingUInt() then
if PSTATE.EL ==(target) > EL0UInt then
// Check for traps described by the OS.(PSTATE.EL) then return TRUE;
spsr_mode_is_aarch32 = (spsr<4> == '1');
// Check for illegal return:
// * To EL1, EL2 or EL3 with register width specified in the SPSR different from the
// Execution state used in the Exception level being returned to, as determined by
// the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset.
// * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the
// SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset.
// * To AArch64 state from AArch32 state (should be caught by above)
(known, target_el_is_aarch32) =
AArch64.CheckForWFxTrapELUsingAArch32K((target);
assert known || (target ==EL1, wfxtype);
if PSTATE.EL IN {EL0,&& ! ELUsingAArch32(EL1} &&));
if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE;
// Check for illegal return from AArch32 to AArch64
if EL2EnabledUsingAArch32() && !() && !spsr_mode_is_aarch32 then return TRUE;
// Check for illegal return to EL1 when HCR.TGE is set and when either of
// * SecureEL2 is enabled.
// * SecureEL2 is not enabled and EL1 is in Non-secure state.
ifIsInHostHaveEL() then
// Check for traps described by the Hypervisor.(
AArch64.CheckForWFxTrap(EL2, wfxtype);
if) && target == HaveELEL1(&& HCR_EL2.TGE == '1' then
if (!EL3IsSecureBelowEL3) && PSTATE.EL !=() || EL3IsSecureEL2Enabled then
// Check for traps described by the Secure Monitor.
AArch64.CheckForWFxTrap(EL3, wfxtype);
WaitForInterrupt(localtimeout);()) then return TRUE;
return FALSE;
// Provides a hint that the task performed by a thread is of low
// importance so that it could yield to improve overall performance.enumeration
Hint_Yield();InstrSet {InstrSet_A64, InstrSet_A32, InstrSet_T32};
// Returns TRUE if there is any pending physical IRQ
boolean IRQPending();InstructionSynchronizationBarrier();
// IllegalExceptionReturn()
// ========================
// InterruptPending()
// ==================
// Returns TRUE if there are any pending physical or virtual
// interrupts, and FALSE otherwise.
boolean IllegalExceptionReturn(bits(N) spsr)
// Check for illegal return:
// * To an unimplemented Exception level.
// * To EL2 in Secure state, when SecureEL2 is not enabled.
// * To EL0 using AArch64 state, with SPSR.M[0]==1.
// * To AArch64 state with SPSR.M[1]==1.
// * To AArch32 state with an illegal value of SPSR.M.
(valid, target) =InterruptPending()
bit vIRQstatus = (if ELFromSPSRVirtualIRQPending(spsr);
if !valid then return TRUE;
// Check for return to higher Exception level
if() then '1' else '0') OR HCR_EL2.VI;
bit vFIQstatus = (if UIntVirtualFIQPending(target) >() then '1' else '0') OR HCR_EL2.VF;
bits(3) v_interrupts = HCR_EL2.VSE : vIRQstatus : vFIQstatus;
pending_physical_interrupt = ( UIntIRQPending(PSTATE.EL) then return TRUE;
spsr_mode_is_aarch32 = (spsr<4> == '1');
// Check for illegal return:
// * To EL1, EL2 or EL3 with register width specified in the SPSR different from the
// Execution state used in the Exception level being returned to, as determined by
// the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset.
// * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the
// SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset.
// * To AArch64 state from AArch32 state (should be caught by above)
(known, target_el_is_aarch32) =() || ELUsingAArch32KFIQPending(target);
assert known || (target ==() || EL0IsPhysicalSErrorPending && !());
pending_virtual_interrupt = !ELUsingAArch32IsInHost(EL1));
if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE;
// Check for illegal return from AArch32 to AArch64
if UsingAArch32() && !spsr_mode_is_aarch32 then return TRUE;
// Check for illegal return to EL1 when HCR.TGE is set and when either of
// * SecureEL2 is enabled.
// * SecureEL2 is not enabled and EL1 is in Non-secure state.
if HaveEL(EL2) && target == EL1 && HCR_EL2.TGE == '1' then
if (!IsSecureBelowEL3() || IsSecureEL2Enabled()) then return TRUE;
return FALSE;() && ((v_interrupts AND
HCR_EL2.<AMO,IMO,FMO>) != '000');
return pending_physical_interrupt || pending_virtual_interrupt;
enumeration// IsEventRegisterSet()
// ====================
// Return TRUE if the Event Register of this PE is set, and FALSE otherwise.
boolean InstrSet {IsEventRegisterSet()
return EventRegister == '1';InstrSet_A64, InstrSet_A32, InstrSet_T32};
// IsHighestEL()
// =============
// Returns TRUE if given exception level is the highest exception level implemented
boolean IsHighestEL(bits(2) el)
return HighestELInstructionSynchronizationBarrier();() == el;
// InterruptPending()
// ==================
// Returns TRUE if there are any pending physical or virtual
// interrupts, and FALSE otherwise.
// IsInHost()
// ==========
boolean InterruptPending()
bit vIRQstatus = (ifIsInHost()
return VirtualIRQPendingELIsInHost() then '1' else '0') OR HCR_EL2.VI;
bit vFIQstatus = (if VirtualFIQPending() then '1' else '0') OR HCR_EL2.VF;
bits(3) v_interrupts = HCR_EL2.VSE : vIRQstatus : vFIQstatus;
pending_physical_interrupt = (IRQPending() || FIQPending() ||
IsPhysicalSErrorPending());
pending_virtual_interrupt = !IsInHost() && ((v_interrupts AND
HCR_EL2.<AMO,IMO,FMO>) != '000');
return pending_physical_interrupt || pending_virtual_interrupt;(PSTATE.EL);
// IsEventRegisterSet()
// ====================
// Return TRUE if the Event Register of this PE is set, and FALSE otherwise.
// Returns TRUE if a physical SError interrupt is pending.
boolean IsEventRegisterSet()
return EventRegister == '1';IsPhysicalSErrorPending();
// IsHighestEL()
// =============
// Returns TRUE if given exception level is the highest exception level implemented
// IsSErrorEdgeTriggered()
// =======================
// Returns TRUE if the physical SError interrupt is edge-triggered
// and FALSE otherwise.
boolean IsHighestEL(bits(2) el)
returnIsSErrorEdgeTriggered(bits(24) syndrome)
if () then
if HaveDoubleFaultExt() then
return TRUE;
if UsingAArch32() && syndrome<11:10> != '00' then
// AArch32 and not Uncontainable.
return TRUE;
if !UsingAArch32HighestELHaveRASExt() == el;() && syndrome<23> == '0' && syndrome<5:0> != '000000' then
// AArch64 and neither IMPLEMENTATION DEFINED syndrome nor Uncategorized.
return TRUE;
return boolean IMPLEMENTATION_DEFINED "Edge-triggered SError";
// IsInHost()
// IsSecure()
// ==========
// Returns TRUE if current Exception level is in Secure state.
boolean IsInHost()
returnIsSecure()
if (EL3) && !UsingAArch32() && PSTATE.EL == EL3 then
return TRUE;
elsif HaveEL(EL3) && UsingAArch32() && PSTATE.M == M32_Monitor then
return TRUE;
return IsSecureBelowEL3ELIsInHostHaveEL(PSTATE.EL);();
// Returns TRUE if a physical SError interrupt is pending.
// IsSecureBelowEL3()
// ==================
// Return TRUE if an Exception level below EL3 is in Secure state
// or would be following an exception return to that level.
//
// Differs from IsSecure in that it ignores the current EL or Mode
// in considering security state.
// That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an
// exception return would pass to Secure or Non-secure state.
boolean IsPhysicalSErrorPending();IsSecureBelowEL3()
ifHaveEL(EL3) then
return SCR_GEN[].NS == '0';
elsif HaveEL(EL2) && (!HaveSecureEL2Ext() || HighestELUsingAArch32()) then
// If Secure EL2 is not an architecture option then we must be Non-secure.
return FALSE;
else
// TRUE if processor is Secure or FALSE if Non-secure.
return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";
// IsSErrorEdgeTriggered()
// =======================
// Returns TRUE if the physical SError interrupt is edge-triggered
// and FALSE otherwise.
// IsSecureEL2Enabled()
// ====================
// Returns TRUE if Secure EL2 is enabled, FALSE otherwise.
boolean IsSErrorEdgeTriggered(bits(2) target_el, bits(25) syndrome)
IsSecureEL2Enabled()
if HaveRASExtHaveEL() then
if( HaveDoubleFaultExtEL2() then
return TRUE;
if) && HaveSecureEL2Ext() then
if HaveEL(EL3) then
if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then
return TRUE;
else
return FALSE;
else
return IsSecure(target_el) then
if syndrome<11:10> != '00' then
// AArch32 and not Uncontainable.
return TRUE;
else
if syndrome<24> == '0' && syndrome<5:0> != '000000' then
// AArch64 and neither IMPLEMENTATION DEFINED syndrome nor Uncategorized.
return TRUE;
return boolean IMPLEMENTATION_DEFINED "Edge-triggered SError";();
else
return FALSE;
// IsSecure()
// ==========
// Returns TRUE if current Exception level is in Secure state.
// Returns TRUE if a synchronizable physical SError interrupt is pending.
boolean IsSecure()
ifIsSynchronizablePhysicalSErrorPending(); HaveEL(EL3) && !UsingAArch32() && PSTATE.EL == EL3 then
return TRUE;
elsif HaveEL(EL3) && UsingAArch32() && PSTATE.M == M32_Monitor then
return TRUE;
return IsSecureBelowEL3();
// IsSecureBelowEL3()
// ==================
// Return TRUE if an Exception level below EL3 is in Secure state
// or would be following an exception return to that level.
//
// Differs from IsSecure in that it ignores the current EL or Mode
// in considering security state.
// That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an
// exception return would pass to Secure or Non-secure state.
// Returns TRUE if a virtual SError interrupt is pending.
boolean IsSecureBelowEL3()
ifIsVirtualSErrorPending(); HaveEL(EL3) then
return SCR_GEN[].NS == '0';
elsif HaveEL(EL2) && (!HaveSecureEL2Ext() || HighestELUsingAArch32()) then
// If Secure EL2 is not an architecture option then we must be Non-secure.
return FALSE;
else
// TRUE if processor is Secure or FALSE if Non-secure.
return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";
// IsSecureEL2Enabled()
// ====================
// Returns TRUE if Secure EL2 is enabled, FALSE otherwise.
// Returns TRUE if a local timeout event is generated when the value of
// CNTVCT_EL0 equals or exceeds the threshold value for the first time.
// If the threshold value is less than zero a local timeout event will
// not be generated.
boolean IsSecureEL2Enabled()
ifLocalTimeoutEvent(integer localtimeout); HaveEL(EL2) && HaveSecureEL2Ext() then
if HaveEL(EL3) then
if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then
return TRUE;
else
return FALSE;
else
return IsSecure();
else
return FALSE;
// Returns TRUE if a synchronizable physical SError interrupt is pending.
booleanconstant bits(5) IsSynchronizablePhysicalSErrorPending();M32_User = '10000';
constant bits(5)M32_FIQ = '10001';
constant bits(5) M32_IRQ = '10010';
constant bits(5) M32_Svc = '10011';
constant bits(5) M32_Monitor = '10110';
constant bits(5) M32_Abort = '10111';
constant bits(5) M32_Hyp = '11010';
constant bits(5) M32_Undef = '11011';
constant bits(5) M32_System = '11111';
// Returns TRUE if a virtual SError interrupt is pending.
boolean// PLOfEL()
// ========
PrivilegeLevel IsVirtualSErrorPending();PLOfEL(bits(2) el)
case el of
whenEL3 return if HighestELUsingAArch32() then PL1 else PL3;
when EL2 return PL2;
when EL1 return PL1;
when EL0 return PL0;
ProcState// Returns TRUE if a local timeout event is generated when the value of
// CNTVCT_EL0 equals or exceeds the threshold value for the first time.
// If the threshold value is less than zero a local timeout event will
// not be generated.
boolean LocalTimeoutEvent(integer localtimeout);PSTATE;
constant bits(5)enumeration M32_User = '10000';
constant bits(5)PrivilegeLevel { M32_FIQ = '10001';
constant bits(5)PL3, M32_IRQ = '10010';
constant bits(5)PL2, M32_Svc = '10011';
constant bits(5)PL1, M32_Monitor = '10110';
constant bits(5)PL0}; M32_Abort = '10111';
constant bits(5) M32_Hyp = '11010';
constant bits(5) M32_Undef = '11011';
constant bits(5) M32_System = '11111';
// PLOfEL()
// ========
PrivilegeLeveltype PLOfEL(bits(2) el)
case el of
whenProcState is (
bits (1) N, // Negative condition flag
bits (1) Z, // Zero condition flag
bits (1) C, // Carry condition flag
bits (1) V, // oVerflow condition flag
bits (1) D, // Debug mask bit [AArch64 only]
bits (1) A, // SError interrupt mask bit
bits (1) I, // IRQ mask bit
bits (1) F, // FIQ mask bit
bits (1) PAN, // Privileged Access Never Bit [v8.1]
bits (1) UAO, // User Access Override [v8.2]
bits (1) DIT, // Data Independent Timing [v8.4]
bits (1) TCO, // Tag Check Override [v8.5, AArch64 only]
bits (2) BTYPE, // Branch Type [v8.5]
bits (1) SS, // Software step bit
bits (1) IL, // Illegal Execution state bit
bits (2) EL, // Exception Level
bits (1) nRW, // not Register Width: 0=64, 1=32
bits (1) EL3SP return if HighestELUsingAArch32() then PL1 else PL3;
when EL2 return PL2;
when EL1 return PL1;
when EL0 return PL0;, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only]
bits (1) Q, // Cumulative saturation flag [AArch32 only]
bits (4) GE, // Greater than or Equal flags [AArch32 only]
bits (1) SSBS, // Speculative Store Bypass Safe
bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only]
bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR]
bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only]
bits (1) E, // Endianness bit [AArch32 only]
bits (5) M // Mode field [AArch32 only]
)
// RestoredITBits()
// ================
// Get the value of PSTATE.IT to be restored on this exception return.
bits(8) RestoredITBits(bits(N) spsr)
it = spsr<15:10,26:25>;
// When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set
// to zero or copied from the SPSR.
if PSTATE.IL == '1' then
if ConstrainUnpredictableBool(Unpredictable_ILZEROIT) then return '00000000';
else return it;
// The IT bits are forced to zero when they are set to a reserved value.
if !IsZero(it<7:4>) && IsZero(it<3:0>) then
return '00000000';
// The IT bits are forced to zero when returning to A32 state, or when returning to an EL
// with the ITD bit set to 1, and the IT bits are describing a multi-instruction block.
itd = if PSTATE.EL == EL2 then HSCTLR.ITD else SCTLR.ITD;
if (spsr<5> == '0' && !IsZero(it)) || (itd == '1' && !IsZeroProcState PSTATE;(it<2:0>)) then
return '00000000';
else
return it;
enumerationtype PrivilegeLevel {SCRType;PL3, PL2, PL1, PL0};
type// SCR_GEN[]
// =========
SCRType ProcState is (
bits (1) N, // Negative condition flag
bits (1) Z, // Zero condition flag
bits (1) C, // Carry condition flag
bits (1) V, // oVerflow condition flag
bits (1) D, // Debug mask bit [AArch64 only]
bits (1) A, // SError interrupt mask bit
bits (1) I, // IRQ mask bit
bits (1) F, // FIQ mask bit
bits (1) PAN, // Privileged Access Never Bit [v8.1]
bits (1) UAO, // User Access Override [v8.2]
bits (1) DIT, // Data Independent Timing [v8.4]
bits (1) TCO, // Tag Check Override [v8.5, AArch64 only]
bits (2) BTYPE, // Branch Type [v8.5]
bits (1) SS, // Software step bit
bits (1) IL, // Illegal Execution state bit
bits (2) EL, // Exception Level
bits (1) nRW, // not Register Width: 0=64, 1=32
bits (1)SCR_GEN[]
// AArch32 secure & AArch64 EL3 registers are not architecturally mapped
assert (EL3);
bits(64) r;
if HighestELUsingAArch32() then
r = ZeroExtendSPHaveEL, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only]
bits (1) Q, // Cumulative saturation flag [AArch32 only]
bits (4) GE, // Greater than or Equal flags [AArch32 only]
bits (1) SSBS, // Speculative Store Bypass Safe
bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only]
bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR]
bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only]
bits (1) E, // Endianness bit [AArch32 only]
bits (5) M // Mode field [AArch32 only]
)(SCR);
else
r = SCR_EL3;
return r;
// RestoredITBits()
// ================
// Get the value of PSTATE.IT to be restored on this exception return.
bits(8)// Signal an event to all PEs in a multiprocessor system to set their Event Registers.
// When a PE executes the SEV instruction, it causes this function to be executed. RestoredITBits(bits(N) spsr)
it = spsr<15:10,26:25>;
// When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set
// to zero or copied from the SPSR.
if PSTATE.IL == '1' then
ifSendEvent(); ConstrainUnpredictableBool(Unpredictable_ILZEROIT) then return '00000000';
else return it;
// The IT bits are forced to zero when they are set to a reserved value.
if !IsZero(it<7:4>) && IsZero(it<3:0>) then
return '00000000';
// The IT bits are forced to zero when returning to A32 state, or when returning to an EL
// with the ITD bit set to 1, and the IT bits are describing a multi-instruction block.
itd = if PSTATE.EL == EL2 then HSCTLR.ITD else SCTLR.ITD;
if (spsr<5> == '0' && !IsZero(it)) || (itd == '1' && !IsZero(it<2:0>)) then
return '00000000';
else
return it;
type// SendEventLocal()
// ================
// Set the local Event Register of this PE.
// When a PE executes the SEVL instruction, it causes this function to be executed. SCRType;SendEventLocal()
EventRegister = '1';
return;
// SCR_GEN[]
// =========
SCRType// SetPSTATEFromPSR()
// ==================
// Set PSTATE based on a PSR value SCR_GEN[]
// AArch32 secure & AArch64 EL3 registers are not architecturally mapped
assertSetPSTATEFromPSR(bits(N) spsr)
boolean from_aarch64 = ! HaveELUsingAArch32(();
assert N == (if from_aarch64 then 64 else 32);
PSTATE.SS =EL3DebugExceptionReturnSS);
bits(64) r;
(spsr);
ShouldAdvanceSS = FALSE;
if HighestELUsingAArch32IllegalExceptionReturn() then
r =(spsr) then
PSTATE.IL = '1';
if () then PSTATE.SSBS = bit UNKNOWN;
if HaveBTIExt() then PSTATE.BTYPE = bits(2) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = bit UNKNOWN;
if HaveDITExt() then PSTATE.DIT = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = bit UNKNOWN;
else
// State that is reinstated only on a legal exception return
PSTATE.IL = spsr<20>;
if spsr<4> == '1' then // AArch32 state
AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly
if HaveSSBSExt() then PSTATE.SSBS = spsr<23>;
else // AArch64 state
PSTATE.nRW = '0';
PSTATE.EL = spsr<3:2>;
PSTATE.SP = spsr<0>;
if HaveBTIExt() then PSTATE.BTYPE = spsr<11:10>;
if HaveSSBSExt() then PSTATE.SSBS = spsr<12>;
if HaveUAOExt() then PSTATE.UAO = spsr<23>;
if HaveDITExt() then PSTATE.DIT = spsr<24>;
if HaveMTEExt() then PSTATE.TCO = spsr<25>;
// If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether
// the T bit is set to zero or copied from SPSR.
if PSTATE.IL == '1' && PSTATE.nRW == '1' then
if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr<5> = '0';
// State that is reinstated regardless of illegal exception return
PSTATE.<N,Z,C,V> = spsr<31:28>;
if HavePANExt() then PSTATE.PAN = spsr<22>;
if PSTATE.nRW == '1' then // AArch32 state
PSTATE.Q = spsr<27>;
PSTATE.IT = RestoredITBits(spsr);
ShouldAdvanceIT = FALSE;
if HaveDITExt() then PSTATE.DIT = (if (RestartingZeroExtendHaveSSBSExt(SCR);
else
r = SCR_EL3;
return r;() || from_aarch64) then spsr<24> else spsr<21>);
PSTATE.GE = spsr<19:16>;
PSTATE.E = spsr<9>;
PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state
PSTATE.T = spsr<5>; // PSTATE.J is RES0
else // AArch64 state
PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state
return;
// Signal an event to all PEs in a multiprocessor system to set their Event Registers.
// When a PE executes the SEV instruction, it causes this function to be executed.boolean ShouldAdvanceIT;
SendEvent();
// SendEventLocal()
// ================
// Set the local Event Register of this PE.
// When a PE executes the SEVL instruction, it causes this function to be executed.boolean ShouldAdvanceSS;
SendEventLocal()
EventRegister = '1';
return;
// SetPSTATEFromPSR()
// ==================
// Set PSTATE based on a PSR value
SetPSTATEFromPSR(bits(N) spsr)
boolean from_aarch64 = !UsingAArch32();
assert N == (if from_aarch64 then 64 else 32);
PSTATE.SS = DebugExceptionReturnSS(spsr);
ShouldAdvanceSS = FALSE;
if IllegalExceptionReturn(spsr) then
PSTATE.IL = '1';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
if HaveBTIExt() then PSTATE.BTYPE = bits(2) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = bit UNKNOWN;
if HaveDITExt() then PSTATE.DIT = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = bit UNKNOWN;
else
// State that is reinstated only on a legal exception return
PSTATE.IL = spsr<20>;
if spsr<4> == '1' then // AArch32 state
AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly
if HaveSSBSExt() then PSTATE.SSBS = spsr<23>;
else // AArch64 state
PSTATE.nRW = '0';
PSTATE.EL = spsr<3:2>;
PSTATE.SP = spsr<0>;
if HaveBTIExt() then PSTATE.BTYPE = spsr<11:10>;
if HaveSSBSExt() then PSTATE.SSBS = spsr<12>;
if HaveUAOExt() then PSTATE.UAO = spsr<23>;
if HaveDITExt() then PSTATE.DIT = spsr<24>;
if HaveMTEExt() then PSTATE.TCO = spsr<25>;
// If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether
// the T bit is set to zero or copied from SPSR.
if PSTATE.IL == '1' && PSTATE.nRW == '1' then
if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr<5> = '0';
// State that is reinstated regardless of illegal exception return
PSTATE.<N,Z,C,V> = spsr<31:28>;
if HavePANExt() then PSTATE.PAN = spsr<22>;
if PSTATE.nRW == '1' then // AArch32 state
PSTATE.Q = spsr<27>;
PSTATE.IT = RestoredITBits(spsr);
ShouldAdvanceIT = FALSE;
if HaveDITExt() then PSTATE.DIT = (if (Restarting() || from_aarch64) then spsr<24> else spsr<21>);
PSTATE.GE = spsr<19:16>;
PSTATE.E = spsr<9>;
PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state
PSTATE.T = spsr<5>; // PSTATE.J is RES0
else // AArch64 state
PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state
return;SpeculationBarrier();
boolean ShouldAdvanceSS;// Implements the error synchronization event.SynchronizeErrors();
// Take any pending unmasked physical SError interrupt
SpeculationBarrier();TakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req);
// Take any pending unmasked physical SError interrupt or unmasked virtual SError
// interrupt.
SynchronizeContext();TakeUnmaskedSErrorInterrupts();
// Implements the error synchronization event.bits(32)
SynchronizeErrors();ThisInstr();
// Take any pending unmasked physical SError interruptinteger
TakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req);ThisInstrLength();
// Take any pending unmasked physical SError interrupt or unmasked virtual SError
// interrupt.
TakeUnmaskedSErrorInterrupts();Unreachable()
assert FALSE;
bits(32)// UsingAArch32()
// ==============
// Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64.
boolean ThisInstr();UsingAArch32()
boolean aarch32 = (PSTATE.nRW == '1');
if !HaveAnyAArch32() then assert !aarch32;
if HighestELUsingAArch32() then assert aarch32;
return aarch32;
integer// Returns TRUE if there is any pending virtual FIQ
boolean ThisInstrLength();VirtualFIQPending();
// Returns TRUE if there is any pending virtual IRQ
boolean Unreachable()
assert FALSE;VirtualIRQPending();
// UsingAArch32()
// WaitForEvent()
// ==============
// Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64.
boolean// PE suspends its operation and enters a low-power state if the
// Event Register is clear and, for WFET, there is no Local
// Timeout event when the WFET is executed. UsingAArch32()
boolean aarch32 = (PSTATE.nRW == '1');
if !WaitForEvent(integer localtimeout)
if !(HaveAnyAArch32IsEventRegisterSet() then assert !aarch32;
if() || (localtimeout)) then
EnterLowPowerStateHighestELUsingAArch32LocalTimeoutEvent() then assert aarch32;
return aarch32;();
return;
// Returns TRUE if there is any pending virtual FIQ
boolean// WaitForInterrupt()
// ==================
// PE suspends its operation to enter a low-power state until
// a WFI wake-up event occurs, the PE is reset, and, for WFIT,
// a Local Timeout Event is generated. VirtualFIQPending();WaitForInterrupt(integer localtimeout)
if localtimeout < 0 thenEnterLowPowerState();
else
if !LocalTimeoutEvent(localtimeout) then
EnterLowPowerState();
return;
// Returns TRUE if there is any pending virtual IRQ
boolean// ConstrainUnpredictable()
// ========================
// Return the appropriate Constraint result to control the caller's behavior. The return value
// is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case.
// (The permitted list is determined by an assert or case statement at the call site.)
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// The extra argument is used here to allow this example definition. This is an example only and
// does not imply a fixed implementation of these behaviors. Indeed the intention is that it should
// be defined by each implementation, according to its implementation choices.
Constraint VirtualIRQPending();ConstrainUnpredictable(Unpredictable which)
case which of
when Unpredictable_VMSR
return Constraint_UNDEF;
when Unpredictable_WBOVERLAPLD
return Constraint_WBSUPPRESS; // return loaded value
when Unpredictable_WBOVERLAPST
return Constraint_NONE; // store pre-writeback value
when Unpredictable_LDPOVERLAP
return Constraint_UNDEF; // instruction is UNDEFINED
when Unpredictable_BASEOVERLAP
return Constraint_NONE; // use original address
when Unpredictable_DATAOVERLAP
return Constraint_NONE; // store original value
when Unpredictable_DEVPAGE2
return Constraint_FAULT; // take an alignment fault
when Unpredictable_DEVICETAGSTORE
return Constraint_NONE; // Do not take a fault
when Unpredictable_INSTRDEVICE
return Constraint_NONE; // Do not take a fault
when Unpredictable_RESCPACR
return Constraint_TRUE; // Map to UNKNOWN value
when Unpredictable_RESMAIR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTEXCB
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESDACR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESPRRR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESVTCRS
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_OORTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_LARGEIPA
return Constraint_FORCE; // Restrict the inputsize to the PAMax value
when Unpredictable_ESRCONDPASS
return Constraint_FALSE; // Report as "AL"
when Unpredictable_ILZEROIT
return Constraint_FALSE; // Do not zero PSTATE.IT
when Unpredictable_ILZEROT
return Constraint_FALSE; // Do not zero PSTATE.T
when Unpredictable_BPVECTORCATCHPRI
return Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword
when Unpredictable_VCMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_VCMATCHDAPA
return Constraint_FALSE; // No match on Data Abort or Prefetch abort
when Unpredictable_WPMASKANDBAS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_WPBASCONTIGUOUS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESWPMASK
return Constraint_DISABLED; // Watchpoint disabled
when Unpredictable_WPMASKEDBITS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESBPWPCTRL
return Constraint_DISABLED; // Breakpoint/watchpoint disabled
when Unpredictable_BPNOTIMPL
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_RESBPTYPE
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPNOTCTXCMP
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_BPMISMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_RESTARTALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_RESTARTZEROUPPERPC
return Constraint_TRUE; // Force zero extension
when Unpredictable_ZEROUPPER
return Constraint_TRUE; // zero top halves of X registers
when Unpredictable_ERETZEROUPPERPC
return Constraint_TRUE; // zero top half of PC
when Unpredictable_A32FORCEALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_SMD
return Constraint_UNDEF; // disabled SMC is Unallocated
when Unpredictable_NONFAULT
return Constraint_FALSE; // Speculation enabled
when Unpredictable_SVEZEROUPPER
return Constraint_TRUE; // zero top bits of Z registers
when Unpredictable_SVELDNFDATA
return Constraint_TRUE; // Load mem data in NF loads
when Unpredictable_SVELDNFZERO
return Constraint_TRUE; // Write zeros in NF loads
when Unpredictable_CHECKSPNONEACTIVE
return Constraint_TRUE; // Check SP alignment
when Unpredictable_AFUPDATE // AF update for alignment or permission fault
return Constraint_TRUE;
when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state
return Constraint_TRUE;
when Unpredictable_BADPMSFCR // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1
return Constraint_TRUE;
when Unpredictable_ZEROBTYPE
return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00'
when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight
return Constraint_FALSE;
when Unpredictable_ALUEXCEPTIONRETURN
return Constraint_UNDEF;
when Unpredicatable_DBGxVR_RESS
return Constraint_FALSE;
when Unpredictable_WFxTDEBUG
return Constraint_FALSE; // WFxT in Debug state does not execute as a NOP
when Unpredictable_LS64UNSUPPORTED
return Constraint_LIMITED_ATOMICITY; // Accesses are not single-copy atomic above the byte level
enumeration// ConstrainUnpredictableBits()
// ============================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN.
// If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that
// value is always an allocated value; that is, one for which the behavior is not itself
// CONSTRAINED.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the bits part
// of the result, and may not be applicable in all cases.
(Constraint,bits(width)) WFxType {ConstrainUnpredictableBits(WFxType_WFE,which)
c = WFxType_WFI,(which);
if c == WFxType_WFET,then
return (c, WFxType_WFIT};(width)); // See notes; this is an example implementation only
else
return (c, bits(width) UNKNOWN); // bits result not used
// WaitForEvent()
// ==============
// PE suspends its operation and enters a low-power state if the
// Event Register is clear and, for WFET, there is no Local
// Timeout event when the WFET is executed.// ConstrainUnpredictableBool()
// ============================
// This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
boolean
WaitForEvent(integer localtimeout)
if !(ConstrainUnpredictableBool(IsEventRegisterSetUnpredictable() ||which)
c = LocalTimeoutEventConstrainUnpredictable(localtimeout)) then(which);
assert c IN {
, Constraint_FALSE};
return (c == Constraint_TRUEEnterLowPowerStateConstraint_TRUE();
return;);
// WaitForInterrupt()
// ==================
// PE suspends its operation to enter a low-power state until
// a WFI wake-up event occurs, the PE is reset, and, for WFIT,
// a Local Timeout Event is generated.// ConstrainUnpredictableInteger()
// ===============================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If
// the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range
// low to high, inclusive.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the integer part
// of the result.
(Constraint,integer)
WaitForInterrupt(integer localtimeout)
if localtimeout < 0 thenConstrainUnpredictableInteger(integer low, integer high,
EnterLowPowerStateUnpredictable();
else
if !which)
c =LocalTimeoutEventConstrainUnpredictable(localtimeout) then(which);
if c ==
EnterLowPowerStateConstraint_UNKNOWN();
return;then
return (c, low); // See notes; this is an example implementation only
else
return (c, integer UNKNOWN); // integer result not used
// ConstrainUnpredictable()
// ========================
// Return the appropriate Constraint result to control the caller's behavior. The return value
// is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case.
// (The permitted list is determined by an assert or case statement at the call site.)
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// The extra argument is used here to allow this example definition. This is an example only and
// does not imply a fixed implementation of these behaviors. Indeed the intention is that it should
// be defined by each implementation, according to its implementation choices.
Constraintenumeration ConstrainUnpredictable(Constraint {// GeneralUnpredictable which)
case which of
whenConstraint_NONE, // Instruction executes with
// no change or side-effect to its described behavior Unpredictable_VMSR
returnConstraint_UNKNOWN, // Destination register has UNKNOWN value Constraint_UNDEF;
whenConstraint_UNDEF, // Instruction is UNDEFINED Unpredictable_WBOVERLAPLD
returnConstraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only Constraint_WBSUPPRESS; // return loaded value
whenConstraint_NOP, // Instruction executes as NOP Unpredictable_WBOVERLAPST
returnConstraint_TRUE, Constraint_NONE; // store pre-writeback value
whenConstraint_FALSE, Unpredictable_LDPOVERLAP
returnConstraint_DISABLED, Constraint_UNDEF; // instruction is UNDEFINED
whenConstraint_UNCOND, // Instruction executes unconditionally Unpredictable_BASEOVERLAP
returnConstraint_COND, // Instruction executes conditionally Constraint_NONE; // use original address
whenConstraint_ADDITIONAL_DECODE, // Instruction executes with additional decode
// Load-store Unpredictable_DATAOVERLAP
returnConstraint_WBSUPPRESS, Constraint_NONE; // store original value
whenConstraint_FAULT, Unpredictable_DEVPAGE2
returnConstraint_LIMITED_ATOMICITY, // Accesses are not single-copy atomic above the byte level
// IPA too large Constraint_FAULT; // take an alignment fault
whenConstraint_FORCE, Unpredictable_DEVICETAGSTORE
return Constraint_NONE; // Do not take a fault
when Unpredictable_INSTRDEVICE
return Constraint_NONE; // Do not take a fault
when Unpredictable_RESCPACR
return Constraint_TRUE; // Map to UNKNOWN value
when Unpredictable_RESMAIR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTEXCB
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESDACR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESPRRR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESVTCRS
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_OORTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_LARGEIPA
return Constraint_FORCE; // Restrict the inputsize to the PAMax value
when Unpredictable_ESRCONDPASS
return Constraint_FALSE; // Report as "AL"
when Unpredictable_ILZEROIT
return Constraint_FALSE; // Do not zero PSTATE.IT
when Unpredictable_ILZEROT
return Constraint_FALSE; // Do not zero PSTATE.T
when Unpredictable_BPVECTORCATCHPRI
return Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword
when Unpredictable_VCMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_VCMATCHDAPA
return Constraint_FALSE; // No match on Data Abort or Prefetch abort
when Unpredictable_WPMASKANDBAS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_WPBASCONTIGUOUS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESWPMASK
return Constraint_DISABLED; // Watchpoint disabled
when Unpredictable_WPMASKEDBITS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESBPWPCTRL
return Constraint_DISABLED; // Breakpoint/watchpoint disabled
when Unpredictable_BPNOTIMPL
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_RESBPTYPE
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPNOTCTXCMP
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_BPMISMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_RESTARTALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_RESTARTZEROUPPERPC
return Constraint_TRUE; // Force zero extension
when Unpredictable_ZEROUPPER
return Constraint_TRUE; // zero top halves of X registers
when Unpredictable_ERETZEROUPPERPC
return Constraint_TRUE; // zero top half of PC
when Unpredictable_A32FORCEALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_SMD
return Constraint_UNDEF; // disabled SMC is Unallocated
when Unpredictable_NONFAULT
return Constraint_FALSE; // Speculation enabled
when Unpredictable_SVEZEROUPPER
return Constraint_TRUE; // zero top bits of Z registers
when Unpredictable_SVELDNFDATA
return Constraint_TRUE; // Load mem data in NF loads
when Unpredictable_SVELDNFZERO
return Constraint_TRUE; // Write zeros in NF loads
when Unpredictable_CHECKSPNONEACTIVE
return Constraint_TRUE; // Check SP alignment
when Unpredictable_AFUPDATE // AF update for alignment or permission fault
return Constraint_TRUE;
when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state
return Constraint_TRUE;
when Unpredictable_BADPMSFCR // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1
return Constraint_TRUE;
when Unpredictable_ZEROBTYPE
return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00'
when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight
return Constraint_FALSE;
when Unpredictable_ALUEXCEPTIONRETURN
return Constraint_UNDEF;
when Unpredictable_DBGxVR_RESS
return Constraint_FALSE;
when Unpredictable_PMSCR_PCT
return Constraint_PMSCR_PCT_VIRT;
when Unpredictable_WFxTDEBUG
return Constraint_FALSE; // WFxT in Debug state does not execute as a NOP
when Unpredictable_LS64UNSUPPORTED
return Constraint_LIMITED_ATOMICITY; // Accesses are not single-copy atomic above the byte level
when Unpredictable_IGNORETRAPINDEBUG
return Constraint_TRUE; // Trap to register access in debug state is ignored
when Unpredictable_PMUEVENTCOUNTER
return Constraint_UNDEF; // Accesses to the register are UNDEFINEDConstraint_FORCENOSLCHECK};
// ConstrainUnpredictableBits()
// ============================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN.
// If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that
// value is always an allocated value; that is, one for which the behavior is not itself
// CONSTRAINED.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the bits part
// of the result, and may not be applicable in all cases.
(Constraint,bits(width))enumeration ConstrainUnpredictableBits(Unpredictable {// VMSR on MVFRUnpredictable which)
c =Unpredictable_VMSR,
// Writeback/transfer register overlap (load) ConstrainUnpredictable(which);
if c ==Unpredictable_WBOVERLAPLD,
// Writeback/transfer register overlap (store) Constraint_UNKNOWN then
return (c,Unpredictable_WBOVERLAPST,
// Load Pair transfer register overlap Zeros(width)); // See notes; this is an example implementation only
elsif c ==Unpredictable_LDPOVERLAP,
// Store-exclusive base/status register overlap Constraint_PMSCR_PCT_VIRT then
return (c,Unpredictable_BASEOVERLAP,
// Store-exclusive data/status register overlapUnpredictable_DATAOVERLAP,
// Load-store alignment checks
Unpredictable_DEVPAGE2,
// Instruction fetch from Device memory
Unpredictable_INSTRDEVICE,
// Reserved CPACR value
Unpredictable_RESCPACR,
// Reserved MAIR value
Unpredictable_RESMAIR,
// Reserved TEX:C:B value
Unpredictable_RESTEXCB,
// Reserved PRRR value
Unpredictable_RESPRRR,
// Reserved DACR field
Unpredictable_RESDACR,
// Reserved VTCR.S value
Unpredictable_RESVTCRS,
// Reserved TCR.TnSZ value
Unpredictable_RESTnSZ,
// Reserved SCTLR_ELx.TCF value
Unpredictable_RESTCF,
// Tag stored to Device memory
Unpredictable_DEVICETAGSTORE,
// Out-of-range TCR.TnSZ value
Unpredictable_OORTnSZ,
// IPA size exceeds PA size
Unpredictable_LARGEIPA,
// Syndrome for a known-passing conditional A32 instruction
Unpredictable_ESRCONDPASS,
// Illegal State exception: zero PSTATE.IT
Unpredictable_ILZEROIT,
// Illegal State exception: zero PSTATE.T
Unpredictable_ILZEROT,
// Debug: prioritization of Vector Catch
Unpredictable_BPVECTORCATCHPRI,
// Debug Vector Catch: match on 2nd halfword
Unpredictable_VCMATCHHALF,
// Debug Vector Catch: match on Data Abort or Prefetch abort
Unpredictable_VCMATCHDAPA,
// Debug watchpoints: non-zero MASK and non-ones BAS
Unpredictable_WPMASKANDBAS,
// Debug watchpoints: non-contiguous BAS
Unpredictable_WPBASCONTIGUOUS,
// Debug watchpoints: reserved MASK
Unpredictable_RESWPMASK,
// Debug watchpoints: non-zero MASKed bits of address
Unpredictable_WPMASKEDBITS,
// Debug breakpoints and watchpoints: reserved control bits
Unpredictable_RESBPWPCTRL,
// Debug breakpoints: not implemented
Unpredictable_BPNOTIMPL,
// Debug breakpoints: reserved type
Unpredictable_RESBPTYPE,
// Debug breakpoints: not-context-aware breakpoint
Unpredictable_BPNOTCTXCMP,
// Debug breakpoints: match on 2nd halfword of instruction
Unpredictable_BPMATCHHALF,
// Debug breakpoints: mismatch on 2nd halfword of instruction
Unpredictable_BPMISMATCHHALF,
// Debug: restart to a misaligned AArch32 PC value
Unpredictable_RESTARTALIGNPC,
// Debug: restart to a not-zero-extended AArch32 PC value
Unpredictable_RESTARTZEROUPPERPC,
// Zero top 32 bits of X registers in AArch32 state
Unpredictable_ZEROUPPER,
// Zero top 32 bits of PC on illegal return to AArch32 state
Unpredictable_ERETZEROUPPERPC,
// Force address to be aligned when interworking branch to A32 state
Unpredictable_A32FORCEALIGNPC,
// SMC disabled
Unpredictable_SMD,
// FF speculation
Unpredictable_NONFAULT,
// Zero top bits of Z registers in EL change
Unpredictable_SVEZEROUPPER,
// Load mem data in NF loads
Unpredictable_SVELDNFDATA,
// Write zeros in NF loads
Unpredictable_SVELDNFZERO,
// SP alignment fault when predicate is all zero
Unpredictable_CHECKSPNONEACTIVE,
// Access Flag Update by HW
Unpredictable_AFUPDATE,
// Consider SCTLR[].IESB in Debug state
Unpredictable_IESBinDebug,
// Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1
Unpredictable_BADPMSFCR,
// Zero saved BType value in SPSR_ELx/DPSR_EL0
Unpredictable_ZEROBTYPE,
// Timestamp constrained to virtual or physical
Unpredictable_EL2TIMESTAMP,
Unpredictable_EL1TIMESTAMP,
// WFET or WFIT instruction in Debug state
Unpredictable_WFxTDEBUG,
// Address does not support LS64 instructions
Unpredictable_LS64UNSUPPORTED,
// Clearing DCC/ITR sticky flags when instruction is in flight
Unpredictable_CLEARERRITEZERO,
// ALUEXCEPTIONRETURN when in user/system mode in A32 instructions
Unpredictable_ALUEXCEPTIONRETURN,
// Compare DBGBVR.RESS for BP/WP
Zeros(width));
else
return (c, bits(width) UNKNOWN); // bits result not usedUnpredicatable_DBGxVR_RESS};
// ConstrainUnpredictableBool()
// ============================
// AdvSIMDExpandImm()
// ==================
// This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
booleanbits(64) ConstrainUnpredictableBool(AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8)
case cmode<3:1> of
when '000'
imm64 =UnpredictableReplicate which)
c =( ConstrainUnpredictableZeros(which);
assert c IN {(24):imm8, 2);
when '001'
imm64 =Constraint_TRUEReplicate,( Constraint_FALSEZeros};
return (c ==(16):imm8: (8), 2);
when '010'
imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2);
when '011'
imm64 = Replicate(imm8:Zeros(24), 2);
when '100'
imm64 = Replicate(Zeros(8):imm8, 4);
when '101'
imm64 = Replicate(imm8:Zeros(8), 4);
when '110'
if cmode<0> == '0' then
imm64 = Replicate(Zeros(16):imm8:Ones(8), 2);
else
imm64 = Replicate(Zeros(8):imm8:Ones(16), 2);
when '111'
if cmode<0> == '0' && op == '0' then
imm64 = Replicate(imm8, 8);
if cmode<0> == '0' && op == '1' then
imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8);
imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8);
imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8);
imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8);
imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h;
if cmode<0> == '1' && op == '0' then
imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
imm64 = Replicate(imm32, 2);
if cmode<0> == '1' && op == '1' then
if UsingAArch32() then ReservedEncoding();
imm64 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:ZerosConstraint_TRUEZeros);(48);
return imm64;
// ConstrainUnpredictableInteger()
// ===============================
// MatMulAdd()
// ===========
//
// Signed or unsigned 8-bit integer matrix multiply and add to 32-bit integer matrix
// result[2, 2] = addend[2, 2] + (op1[2, 8] * op2[8, 2])
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If
// the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range
// low to high, inclusive.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the integer part
// of the result.
(Constraint,integer)bits(N) ConstrainUnpredictableInteger(integer low, integer high,MatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, boolean op1_unsigned, boolean op2_unsigned)
assert N == 128;
bits(N) result;
bits(32) sum;
integer prod;
for i = 0 to 1
for j = 0 to 1
sum = UnpredictableElem which)
c =[addend, 2*i + j, 32];
for k = 0 to 7
prod = ConstrainUnpredictableInt(which);
if c ==( [op1, 8*i + k, 8], op1_unsigned) * Int(Elem[op2, 8*j + k, 8], op2_unsigned);
sum = sum + prod;
ElemConstraint_UNKNOWNElem then
return (c, low); // See notes; this is an example implementation only
else
return (c, integer UNKNOWN); // integer result not used[result, 2*i + j, 32] = sum;
return result;
enumeration// PolynomialMult()
// ================
bits(M+N) Constraint {// GeneralPolynomialMult(bits(M) op1, bits(N) op2)
result =
Constraint_NONE, // Instruction executes with
// no change or side-effect to its described behavior(M+N);
extended_op2 =
Constraint_UNKNOWN, // Destination register has UNKNOWN value(op2, M+N);
for i=0 to M-1
if op1<i> == '1' then
result = result EOR
Constraint_UNDEF, // Instruction is UNDEFINED
Constraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only
Constraint_NOP, // Instruction executes as NOP
Constraint_TRUE,
Constraint_FALSE,
Constraint_DISABLED,
Constraint_UNCOND, // Instruction executes unconditionally
Constraint_COND, // Instruction executes conditionally
Constraint_ADDITIONAL_DECODE, // Instruction executes with additional decode
// Load-store
Constraint_WBSUPPRESS,
Constraint_FAULT,
Constraint_LIMITED_ATOMICITY, // Accesses are not single-copy atomic above the byte level
// IPA too large
Constraint_FORCE, Constraint_FORCENOSLCHECK,
// PMSCR_PCT reserved values select Virtual timestamp
Constraint_PMSCR_PCT_VIRT};(extended_op2, i);
return result;
enumeration// SatQ()
// ======
(bits(N), boolean) Unpredictable {// VMSR on MVFRSatQ(integer i, integer N, boolean unsigned)
(result, sat) = if unsigned then
Unpredictable_VMSR,
// Writeback/transfer register overlap (load)(i, N) else
Unpredictable_WBOVERLAPLD,
// Writeback/transfer register overlap (store)
Unpredictable_WBOVERLAPST,
// Load Pair transfer register overlap
Unpredictable_LDPOVERLAP,
// Store-exclusive base/status register overlap
Unpredictable_BASEOVERLAP,
// Store-exclusive data/status register overlap
Unpredictable_DATAOVERLAP,
// Load-store alignment checks
Unpredictable_DEVPAGE2,
// Instruction fetch from Device memory
Unpredictable_INSTRDEVICE,
// Reserved CPACR value
Unpredictable_RESCPACR,
// Reserved MAIR value
Unpredictable_RESMAIR,
// Reserved TEX:C:B value
Unpredictable_RESTEXCB,
// Reserved PRRR value
Unpredictable_RESPRRR,
// Reserved DACR field
Unpredictable_RESDACR,
// Reserved VTCR.S value
Unpredictable_RESVTCRS,
// Reserved TCR.TnSZ value
Unpredictable_RESTnSZ,
// Reserved SCTLR_ELx.TCF value
Unpredictable_RESTCF,
// Tag stored to Device memory
Unpredictable_DEVICETAGSTORE,
// Out-of-range TCR.TnSZ value
Unpredictable_OORTnSZ,
// IPA size exceeds PA size
Unpredictable_LARGEIPA,
// Syndrome for a known-passing conditional A32 instruction
Unpredictable_ESRCONDPASS,
// Illegal State exception: zero PSTATE.IT
Unpredictable_ILZEROIT,
// Illegal State exception: zero PSTATE.T
Unpredictable_ILZEROT,
// Debug: prioritization of Vector Catch
Unpredictable_BPVECTORCATCHPRI,
// Debug Vector Catch: match on 2nd halfword
Unpredictable_VCMATCHHALF,
// Debug Vector Catch: match on Data Abort or Prefetch abort
Unpredictable_VCMATCHDAPA,
// Debug watchpoints: non-zero MASK and non-ones BAS
Unpredictable_WPMASKANDBAS,
// Debug watchpoints: non-contiguous BAS
Unpredictable_WPBASCONTIGUOUS,
// Debug watchpoints: reserved MASK
Unpredictable_RESWPMASK,
// Debug watchpoints: non-zero MASKed bits of address
Unpredictable_WPMASKEDBITS,
// Debug breakpoints and watchpoints: reserved control bits
Unpredictable_RESBPWPCTRL,
// Debug breakpoints: not implemented
Unpredictable_BPNOTIMPL,
// Debug breakpoints: reserved type
Unpredictable_RESBPTYPE,
// Debug breakpoints: not-context-aware breakpoint
Unpredictable_BPNOTCTXCMP,
// Debug breakpoints: match on 2nd halfword of instruction
Unpredictable_BPMATCHHALF,
// Debug breakpoints: mismatch on 2nd halfword of instruction
Unpredictable_BPMISMATCHHALF,
// Debug: restart to a misaligned AArch32 PC value
Unpredictable_RESTARTALIGNPC,
// Debug: restart to a not-zero-extended AArch32 PC value
Unpredictable_RESTARTZEROUPPERPC,
// Zero top 32 bits of X registers in AArch32 state
Unpredictable_ZEROUPPER,
// Zero top 32 bits of PC on illegal return to AArch32 state
Unpredictable_ERETZEROUPPERPC,
// Force address to be aligned when interworking branch to A32 state
Unpredictable_A32FORCEALIGNPC,
// SMC disabled
Unpredictable_SMD,
// FF speculation
Unpredictable_NONFAULT,
// Zero top bits of Z registers in EL change
Unpredictable_SVEZEROUPPER,
// Load mem data in NF loads
Unpredictable_SVELDNFDATA,
// Write zeros in NF loads
Unpredictable_SVELDNFZERO,
// SP alignment fault when predicate is all zero
Unpredictable_CHECKSPNONEACTIVE,
// Access Flag Update by HW
Unpredictable_AFUPDATE,
// Consider SCTLR[].IESB in Debug state
Unpredictable_IESBinDebug,
// Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1
Unpredictable_BADPMSFCR,
// Zero saved BType value in SPSR_ELx/DPSR_EL0
Unpredictable_ZEROBTYPE,
// Timestamp constrained to virtual or physical
Unpredictable_EL2TIMESTAMP,
Unpredictable_EL1TIMESTAMP,
// WFET or WFIT instruction in Debug state
Unpredictable_WFxTDEBUG,
// Address does not support LS64 instructions
Unpredictable_LS64UNSUPPORTED,
// Clearing DCC/ITR sticky flags when instruction is in flight
Unpredictable_CLEARERRITEZERO,
// ALUEXCEPTIONRETURN when in user/system mode in A32 instructions
Unpredictable_ALUEXCEPTIONRETURN,
// Trap to register in debug state are ignored
Unpredictable_IGNORETRAPINDEBUG,
// Compare DBGBVR.RESS for BP/WP
Unpredictable_DBGxVR_RESS,
// Inaccessible event counter
Unpredictable_PMUEVENTCOUNTER,
// Reserved PMSCR.PCT behaviour.
Unpredictable_PMSCR_PCT};(i, N);
return (result, sat);
// AdvSIMDExpandImm()
// ==================
// SignedSatQ()
// ============
bits(64)(bits(N), boolean) AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8)
case cmode<3:1> of
when '000'
imm64 =SignedSatQ(integer i, integer N)
if i > 2^(N-1) - 1 then
result = 2^(N-1) - 1; saturated = TRUE;
elsif i < -(2^(N-1)) then
result = -(2^(N-1)); saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated); Replicate(Zeros(24):imm8, 2);
when '001'
imm64 = Replicate(Zeros(16):imm8:Zeros(8), 2);
when '010'
imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2);
when '011'
imm64 = Replicate(imm8:Zeros(24), 2);
when '100'
imm64 = Replicate(Zeros(8):imm8, 4);
when '101'
imm64 = Replicate(imm8:Zeros(8), 4);
when '110'
if cmode<0> == '0' then
imm64 = Replicate(Zeros(16):imm8:Ones(8), 2);
else
imm64 = Replicate(Zeros(8):imm8:Ones(16), 2);
when '111'
if cmode<0> == '0' && op == '0' then
imm64 = Replicate(imm8, 8);
if cmode<0> == '0' && op == '1' then
imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8);
imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8);
imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8);
imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8);
imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h;
if cmode<0> == '1' && op == '0' then
imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
imm64 = Replicate(imm32, 2);
if cmode<0> == '1' && op == '1' then
if UsingAArch32() then ReservedEncoding();
imm64 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:Zeros(48);
return imm64;
// MatMulAdd()
// ===========
//
// Signed or unsigned 8-bit integer matrix multiply and add to 32-bit integer matrix
// result[2, 2] = addend[2, 2] + (op1[2, 8] * op2[8, 2])
// UnsignedRSqrtEstimate()
// =======================
bits(N) MatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, boolean op1_unsigned, boolean op2_unsigned)
assert N == 128;
bits(N) result;
bits(32) sum;
integer prod;
for i = 0 to 1
for j = 0 to 1
sum =UnsignedRSqrtEstimate(bits(N) operand)
assert N == 32;
if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF
result = ElemOnes[addend, 2*i + j, 32];
for k = 0 to 7
prod =(N);
else
// input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0)
// estimate is in the range 256 .. 511 representing [1.0 .. 2.0)
estimate = IntRecipSqrtEstimate(ElemUInt[op1, 8*i + k, 8], op1_unsigned) *(operand<31:23>), FALSE);
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> : IntZeros(Elem[op2, 8*j + k, 8], op2_unsigned);
sum = sum + prod;
Elem[result, 2*i + j, 32] = sum;
(N-9);
return result;
// PolynomialMult()
// ================
// UnsignedRecipEstimate()
// =======================
bits(M+N)bits(N) PolynomialMult(bits(M) op1, bits(N) op2)
result =UnsignedRecipEstimate(bits(N) operand)
assert N == 32;
if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF
result = ZerosOnes(M+N);
extended_op2 =(N);
else
// input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0)
// estimate is in the range 256 to 511 representing [1.0 .. 2.0)
estimate = ZeroExtendRecipEstimate(op2, M+N);
for i=0 to M-1
if op1<i> == '1' then
result = result EOR( (operand<31:23>), FALSE);
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> : ZerosLSLUInt(extended_op2, i);
(N-9);
return result;
// SatQ()
// ======
// UnsignedSatQ()
// ==============
(bits(N), boolean) SatQ(integer i, integer N, boolean unsigned)
(result, sat) = if unsigned thenUnsignedSatQ(integer i, integer N)
if i > 2^N - 1 then
result = 2^N - 1; saturated = TRUE;
elsif i < 0 then
result = 0; saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated); UnsignedSatQ(i, N) else SignedSatQ(i, N);
return (result, sat);
// SignedSatQ()
// ============
// SelfHostedTraceEnabled()
// ========================
// Returns TRUE if Self-hosted Trace is enabled.
(bits(N), boolean)boolean SignedSatQ(integer i, integer N)
if i > 2^(N-1) - 1 then
result = 2^(N-1) - 1; saturated = TRUE;
elsif i < -(2^(N-1)) then
result = -(2^(N-1)); saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated);SelfHostedTraceEnabled()
if !HaveTraceExt() || !HaveSelfHostedTrace() then return FALSE;
if HaveEL(EL3) then
secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE);
niden = (secure_trace_enable == '0' || ExternalSecureNoninvasiveDebugEnabled());
else
// If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0')
niden = (!IsSecure() || ExternalSecureNoninvasiveDebugEnabled());
return (EDSCR.TFO == '0' || !niden);
// UnsignedRSqrtEstimate()
// =======================
// TraceAllowed()
// ==============
// Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level
bits(N)boolean UnsignedRSqrtEstimate(bits(N) operand)
assert N == 32;
if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF
result =TraceAllowed()
if ! OnesHaveTraceExt(N);
else
// input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0)
// estimate is in the range 256 .. 511 representing [1.0 .. 2.0)
increasedprecision = FALSE;
estimate =() then return FALSE;
if RecipSqrtEstimateSelfHostedTraceEnabled(() then
ifUIntIsSecure(operand<31:23>), increasedprecision);
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> :() && (EL3) then
secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE);
if secure_trace_enable == '0' then return FALSE;
TGE_bit = if EL2Enabled() then HCR_EL2.TGE else '0';
case PSTATE.EL of
when EL3 TRE_bit = if HighestELUsingAArch32() then TRFCR.E1TRE else '0';
when EL2 TRE_bit = TRFCR_EL2.E2TRE;
when EL1 TRE_bit = TRFCR_EL1.E1TRE;
when EL0 TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE;
return TRE_bit == '1';
else
return (!IsSecure() || ExternalSecureNoninvasiveDebugEnabledZerosHaveEL(N-9);
return result;());
// UnsignedRecipEstimate()
// =======================
// TraceContextIDR2()
// ==================
bits(N)boolean UnsignedRecipEstimate(bits(N) operand)
assert N == 32;
if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF
result =TraceContextIDR2()
if ! OnesTraceAllowed(N);
else
// input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0)
// estimate is in the range 256 to 511 representing [1.0 .. 2.0)
increasedprecision = FALSE;
estimate =()|| ! RecipEstimateHaveEL(UIntEL2(operand<31:23>), increasedprecision);
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> :) then return FALSE;
return (! ZerosSelfHostedTraceEnabled(N-9);
return result;() || TRFCR_EL2.CX == '1');
// UnsignedSatQ()
// ==============
(bits(N), boolean)// Memory barrier instruction that preserves the relative order of memory accesses to System
// registers due to trace operations and other memory accesses to the same registers UnsignedSatQ(integer i, integer N)
if i > 2^N - 1 then
result = 2^N - 1; saturated = TRUE;
elsif i < 0 then
result = 0; saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated);TraceSynchronizationBarrier();
// SelfHostedTraceEnabled()
// ========================
// Returns TRUE if Self-hosted Trace is enabled.
// TraceTimeStamp()
// ================
booleanTimeStamp SelfHostedTraceEnabled()
if !TraceTimeStamp()
ifHaveTraceExtSelfHostedTraceEnabled() || !() then
ifHaveSelfHostedTrace() then return FALSE;
if HaveEL(EL3EL2) then
secure_trace_enable = (if TS_el2 = TRFCR_EL2.TS;
if TS_el2 == '10' then
// Reserved value
(-, TS_el2) = ELUsingAArch32ConstrainUnpredictableBits(EL3Unpredictable_EL2TIMESTAMP) then SDCR.STE else MDCR_EL3.STE);
niden = (secure_trace_enable == '0' ||);
case TS_el2 of
when '00' // Falls through to check TRFCR_EL1.TS
when '01'
return ExternalSecureNoninvasiveDebugEnabledTimeStamp_Virtual());
else
// If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0')
niden = (!;
when '10'
assertIsSecureHaveECVExt() ||();
return ;
when '11'
return TimeStamp_Physical;
otherwise
Unreachable(); // ConstrainUnpredictableBits removes this case
TS_el1 = TRFCR_EL1.TS;
if TS_el1 == 'x0' then
// Reserved value
(-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP);
case TS_el1 of
when '01'
return TimeStamp_Virtual;
when '10'
assert HaveECVExt();
return TimeStamp_OffsetPhysical;
when '11'
return TimeStamp_Physical;
otherwise
Unreachable(); // ConstrainUnpredictableBits removes this case
else
return TimeStamp_CoreSightExternalSecureNoninvasiveDebugEnabledTimeStamp_OffsetPhysical());
return (EDSCR.TFO == '0' || !niden);;
// TraceAllowed()
// ==============
// Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level
// CombineS1S2AttrHints()
// ======================
// Combines cacheability attributes and allocation hints from stage 1 and stage 2
booleanMemAttrHints TraceAllowed()
if !CombineS1S2AttrHints(HaveTraceExtMemAttrHints() then return FALSE;
ifs1desc, SelfHostedTraceEnabledMemAttrHints() then
ifs2desc, IsSecureAccType() &&s2acctype) HaveELMemAttrHints(result;
apply_force_writeback =EL3HaveStage2MemAttrControl) then
secure_trace_enable = (if() && HCR_EL2.FWB == '1';
if apply_force_writeback then
if ELUsingAArch32S2CacheDisabled((s2acctype) then
result.attrs =EL3MemAttr_NC) then SDCR.STE else MDCR_EL3.STE);
if secure_trace_enable == '0' then return FALSE;
TGE_bit = if; // force Non-cacheable
elsif s2desc.attrs == '11' then
result.attrs = s1desc.attrs;
elsif s2desc.attrs == '10' then
result.attrs = EL2EnabledMemAttr_WB() then HCR_EL2.TGE else '0';
case PSTATE.EL of
when; // force Write-back
else
result.attrs = EL3MemAttr_NC TRE_bit = if;
else
if s2desc.attrs == '01' || s1desc.attrs == '01' then
result.attrs = bits(2) UNKNOWN; // Reserved
elsif s2desc.attrs == HighestELUsingAArch32MemAttr_NC() then TRFCR.E1TRE else '0';
when|| s1desc.attrs == EL2MemAttr_NC TRE_bit = TRFCR_EL2.E2TRE;
whenthen
result.attrs = EL1MemAttr_NC TRE_bit = TRFCR_EL1.E1TRE;
when; // Non-cacheable
elsif s2desc.attrs == EL0MemAttr_WT TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE;
return TRE_bit == '1';
else
return (!|| s1desc.attrs ==IsSecureMemAttr_WT() ||then
result.attrs = ; // Write-through
else
result.attrs = MemAttr_WB; // Write-back
if result.attrs == MemAttr_NC then
result.hints = MemHint_No;
elsif apply_force_writeback then
if s1desc.attrs != MemAttr_NC then
result.hints = s1desc.hints;
else
result.hints = MemHint_RWAExternalSecureNoninvasiveDebugEnabledMemAttr_WT());;
else
result.hints = s1desc.hints;
result.transient = s1desc.transient;
return result;
// TraceContextIDR2()
// ==================
// CombineS1S2Device()
// ===================
// Combines device types from stage 1 and stage 2
booleanDeviceType TraceContextIDR2()
if !CombineS1S2Device(TraceAllowedDeviceType()|| !s1device,HaveELDeviceType(s2device)
if s2device ==EL2DeviceType_nGnRnE) then return FALSE;
return (!|| s1device == then
result = DeviceType_nGnRnE;
elsif s2device == DeviceType_nGnRE || s1device == DeviceType_nGnRE then
result = DeviceType_nGnRE;
elsif s2device == DeviceType_nGRE || s1device == DeviceType_nGRE then
result = DeviceType_nGRE;
else
result = DeviceType_GRESelfHostedTraceEnabledDeviceType_nGnRnE() || TRFCR_EL2.CX == '1');;
return result;
// Memory barrier instruction that preserves the relative order of memory accesses to System
// registers due to trace operations and other memory accesses to the same registers// LongConvertAttrsHints()
// =======================
// Convert the long attribute fields for Normal memory as used in the MAIR fields
// to orthogonal attributes and hints
MemAttrHints
TraceSynchronizationBarrier();LongConvertAttrsHints(bits(4) attrfield,AccType acctype)
assert !IsZero(attrfield);
MemAttrHints result;
if S1CacheDisabled(acctype) then // Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
if attrfield<3:2> == '00' then // Write-through transient
result.attrs = MemAttr_WT;
result.hints = attrfield<1:0>;
result.transient = TRUE;
elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
result.transient = FALSE;
elsif attrfield<3:2> == '01' then // Write-back transient
result.attrs = MemAttr_WB;
result.hints = attrfield<1:0>;
result.transient = TRUE;
else // Write-through/Write-back non-transient
result.attrs = attrfield<3:2>;
result.hints = attrfield<1:0>;
result.transient = FALSE;
return result;
// TraceTimeStamp()
// ================
// MemAttrDefaults()
// =================
// Supply default values for memory attributes, including overriding the shareability attributes
// for Device and Non-cacheable memory types.
TimeStampMemoryAttributes TraceTimeStamp()
ifMemAttrDefaults( SelfHostedTraceEnabledMemoryAttributes() then
ifmemattrs)
if memattrs.memtype == HaveELMemType_Device(then
memattrs.inner =EL2MemAttrHints) then
TS_el2 = TRFCR_EL2.TS;
if !UNKNOWN;
memattrs.outer =HaveECVExtMemAttrHints() && TS_el2 == '10' then
// Reserved value
(-, TS_el2) =UNKNOWN;
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
else
memattrs.device = ConstrainUnpredictableBitsDeviceType(UNKNOWN;
if memattrs.inner.attrs ==Unpredictable_EL2TIMESTAMPMemAttr_NC);
case TS_el2 of
when '00'
// Falls out to check TRFCR_EL1.TS
when '01'
return&& memattrs.outer.attrs == TimeStamp_VirtualMemAttr_NC;
when '10'
assert HaveECVExt(); // Otherwise ConstrainUnpredictableBits removes this case
return TimeStamp_OffsetPhysical;
when '11'
return TimeStamp_Physical;
TS_el1 = TRFCR_EL1.TS;
if TS_el1 == '00' || (!HaveECVExt() && TS_el1 == '10') then
// Reserved value
(-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP);
case TS_el1 of
when '01'
return TimeStamp_Virtual;
when '10'
assert HaveECVExt();
return TimeStamp_OffsetPhysical;
when '11'
return TimeStamp_Physical;
otherwise
Unreachable(); // ConstrainUnpredictableBits removes this case
else
return TimeStamp_CoreSight;then
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
return memattrs;
// CombineS1S2AttrHints()
// ======================
// Combines cacheability attributes and allocation hints from stage 1 and stage 2
// S1CacheDisabled()
// =================
MemAttrHintsboolean CombineS1S2AttrHints(S1CacheDisabled(MemAttrHints s1desc, MemAttrHints s2desc, AccType s2acctype,acctype)
if MemTypeELUsingAArch32 s1memtype)(
MemAttrHintsS1TranslationRegime result;
apply_force_writeback =()) then
if PSTATE.EL == HaveStage2MemAttrControlEL2() && HCR_EL2.FWB == '1';
if apply_force_writeback then
ifthen
enable = if acctype == S2CacheDisabledAccType_IFETCH(s2acctype) then
result.attrs =then HSCTLR.I else HSCTLR.C;
else
enable = if acctype == MemAttr_NCAccType_IFETCH; // force Non-cacheable
elsif s2desc.attrs == '11' then
result.attrs = s1desc.attrs;
elsif s2desc.attrs == '10' then
result.attrs =then SCTLR.I else SCTLR.C;
else
enable = if acctype == MemAttr_WBAccType_IFETCH; // force Write-back
else
result.attrs =then MemAttr_NCSCTLR;
else
if s2desc.attrs == '01' || s1desc.attrs == '01' then
result.attrs = bits(2) UNKNOWN; // Reserved
elsif s2desc.attrs ==[].I else MemAttr_NCSCTLR || s1desc.attrs == MemAttr_NC then
result.attrs = MemAttr_NC; // Non-cacheable
elsif s2desc.attrs == MemAttr_WT || s1desc.attrs == MemAttr_WT then
result.attrs = MemAttr_WT; // Write-through
else
result.attrs = MemAttr_WB; // Write-back
if result.attrs == MemAttr_NC then
result.hints = MemHint_No;
elsif apply_force_writeback then
if s1desc.attrs == MemAttr_NC || s1memtype == MemType_Device then
result.hints = MemHint_RWA;
else
result.hints = s1desc.hints;
else
result.hints = s1desc.hints;
result.transient = s1desc.transient;
return result;[].C;
return enable == '0';
// CombineS1S2Device()
// ===================
// Combines device types from stage 1 and stage 2
// S2AttrDecode()
// ==============
// Converts the Stage 2 attribute fields into orthogonal attributes and hints
DeviceTypeMemoryAttributes CombineS1S2Device(S2AttrDecode(bits(2) SH, bits(4) attr,DeviceTypeAccType s1device,acctype) DeviceTypeMemoryAttributes s2device)
memattrs;
if s2device == apply_force_writeback = DeviceType_nGnRnEHaveStage2MemAttrControl || s1device ==() && HCR_EL2.FWB == '1';
// Device memory
if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then
memattrs.memtype = DeviceType_nGnRnEMemType_Device then
result =;
case attr<1:0> of
when '00' memattrs.device = DeviceType_nGnRnE;
elsif s2device == when '01' memattrs.device = DeviceType_nGnRE || s1device ==;
when '10' memattrs.device = DeviceType_nGnREDeviceType_nGRE then
result =;
when '11' memattrs.device = DeviceType_nGnREDeviceType_GRE;
elsif s2device ==
// Normal memory
elsif apply_force_writeback then
if attr<2> == '1' then
memattrs.memtype = DeviceType_nGREMemType_Normal || s1device ==;
memattrs.inner.attrs = attr<1:0>;
memattrs.outer.attrs = attr<1:0>;
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif attr<1:0> != '00' then
memattrs.memtype = DeviceType_nGREMemType_Normal then
result =;
memattrs.outer = DeviceType_nGRES2ConvertAttrsHints;
else
result =(attr<3:2>, acctype);
memattrs.inner = (attr<1:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
else
memattrs = MemoryAttributes UNKNOWN; // Reserved
return MemAttrDefaultsDeviceType_GRES2ConvertAttrsHints;
return result;(memattrs);
// LongConvertAttrsHints()
// =======================
// Convert the long attribute fields for Normal memory as used in the MAIR fields
// to orthogonal attributes and hints
// S2CacheDisabled()
// =================
MemAttrHintsboolean LongConvertAttrsHints(bits(4) attrfield,S2CacheDisabled( AccType acctype)
assert ! ifIsZeroELUsingAArch32(attrfield);(
MemAttrHintsEL2 result;
if) then
disable = if acctype == S1CacheDisabledAccType_IFETCH(acctype) then // Force Non-cacheable
result.attrs =then HCR2.ID else HCR2.CD;
else
disable = if acctype == MemAttr_NCAccType_IFETCH;
result.hints = MemHint_No;
else
if attrfield<3:2> == '00' then // Write-through transient
result.attrs = MemAttr_WT;
result.hints = attrfield<1:0>;
result.transient = TRUE;
elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
result.transient = FALSE;
elsif attrfield<3:2> == '01' then // Write-back transient
result.attrs = MemAttr_WB;
result.hints = attrfield<1:0>;
result.transient = TRUE;
else // Write-through/Write-back non-transient
result.attrs = attrfield<3:2>;
result.hints = attrfield<1:0>;
result.transient = FALSE;
return result;then HCR_EL2.ID else HCR_EL2.CD;
return disable == '1';
// MemAttrDefaults()
// =================
// Supply default values for memory attributes, including overriding the shareability attributes
// for Device and Non-cacheable memory types.
// S2ConvertAttrsHints()
// =====================
// Converts the attribute fields for Normal memory as used in stage 2
// descriptors to orthogonal attributes and hints
MemoryAttributesMemAttrHints MemAttrDefaults(S2ConvertAttrsHints(bits(2) attr,MemoryAttributesAccType memattrs)
if memattrs.memtype ==acctype)
assert attr != '00'; MemType_Device then
memattrs.inner = MemAttrHints UNKNOWN;
memattrs.outer =result;
if MemAttrHintsS2CacheDisabled UNKNOWN;
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
else
memattrs.device =(acctype) then // Force Non-cacheable
result.attrs = DeviceTypeMemAttr_NC UNKNOWN;
if memattrs.inner.attrs ==;
result.hints = MemHint_No;
else
case attr of
when '01' // Non-cacheable (no allocate)
result.attrs = MemAttr_NC && memattrs.outer.attrs ==;
result.hints = ;
when '10' // Write-through
result.attrs = MemAttr_WT;
result.hints = MemHint_RWA;
when '11' // Write-back
result.attrs = MemAttr_WB;
result.hints = MemHint_RWAMemAttr_NCMemHint_No then
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
;
return memattrs; result.transient = FALSE;
return result;
// S1CacheDisabled()
// =================
// ShortConvertAttrsHints()
// ========================
// Converts the short attribute fields for Normal memory as used in the TTBR and
// TEX fields to orthogonal attributes and hints
booleanMemAttrHints S1CacheDisabled(ShortConvertAttrsHints(bits(2) RGN,AccType acctype)
ifacctype, boolean secondstage) ELUsingAArch32MemAttrHints(result;
if (!secondstage &&S1TranslationRegimeS1CacheDisabled()) then
if PSTATE.EL ==(acctype)) || (secondstage && EL2S2CacheDisabled then
enable = if acctype ==(acctype)) then
// Force Non-cacheable
result.attrs = AccType_IFETCHMemAttr_NC then HSCTLR.I else HSCTLR.C;
else
enable = if acctype ==;
result.hints = AccType_IFETCHMemHint_No then SCTLR.I else SCTLR.C;
;
else
enable = if acctype == case RGN of
when '00' // Non-cacheable (no allocate)
result.attrs = AccType_IFETCHMemAttr_NC then;
result.hints = SCTLRMemHint_No[].I else;
when '01' // Write-back, Read and Write allocate
result.attrs = ;
result.hints = MemHint_RWA;
when '10' // Write-through, Read allocate
result.attrs = MemAttr_WT;
result.hints = MemHint_RA;
when '11' // Write-back, Read allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RASCTLRMemAttr_WB[].C;
return enable == '0';;
result.transient = FALSE;
return result;
// S2AttrDecode()
// ==============
// Converts the Stage 2 attribute fields into orthogonal attributes and hints
// WalkAttrDecode()
// ================
MemoryAttributes S2AttrDecode(bits(2) SH, bits(4) attr,WalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage) AccType acctype)
MemoryAttributes memattrs;
apply_force_writeback =memattrs; HaveStage2MemAttrControlAccType() && HCR_EL2.FWB == '1';
// Device memory
if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then
memattrs.memtype =acctype = MemType_DeviceAccType_NORMAL;
case attr<1:0> of
when '00' memattrs.device =
memattrs.memtype = DeviceType_nGnRnE;
when '01' memattrs.device = DeviceType_nGnRE;
when '10' memattrs.device = DeviceType_nGRE;
when '11' memattrs.device = DeviceType_GRE;
// Normal memory
elsif apply_force_writeback then
if attr<2> == '1' then
memattrs.memtype = MemType_Normal;
memattrs.inner.attrs = attr<1:0>;
memattrs.outer.attrs = attr<1:0>;
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif attr<1:0> != '00' then
memattrs.memtype = memattrs.inner = MemType_NormalShortConvertAttrsHints;
memattrs.outer =(IRGN, acctype, secondstage);
memattrs.outer = S2ConvertAttrsHintsShortConvertAttrsHints(attr<3:2>, acctype);
memattrs.inner = S2ConvertAttrsHints(attr<1:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
else
memattrs = MemoryAttributes UNKNOWN; // Reserved
(ORGN, acctype, secondstage);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = FALSE;
return MemAttrDefaults(memattrs);
// S2CacheDisabled()
// =================
// HasS2Translation()
// ==================
// Returns TRUE if stage 2 translation is present for the current translation regime
boolean S2CacheDisabled(HasS2Translation()
return (AccTypeEL2Enabled acctype)
if() && ! ELUsingAArch32IsInHost(() && PSTATE.EL IN {EL2EL0) then
disable = if acctype ==, AccType_IFETCHEL1 then HCR2.ID else HCR2.CD;
else
disable = if acctype == AccType_IFETCH then HCR_EL2.ID else HCR_EL2.CD;
return disable == '1';});
// S2ConvertAttrsHints()
// =====================
// Converts the attribute fields for Normal memory as used in stage 2
// descriptors to orthogonal attributes and hints
// Have16bitVMID()
// ===============
// Returns TRUE if EL2 and support for a 16-bit VMID are implemented.
MemAttrHintsboolean S2ConvertAttrsHints(bits(2) attr,Have16bitVMID()
return AccTypeHaveEL acctype)
assert attr != '00';(
MemAttrHintsEL2 result;
if S2CacheDisabled(acctype) then // Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
case attr of
when '01' // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
when '10' // Write-through
result.attrs = MemAttr_WT;
result.hints = MemHint_RWA;
when '11' // Write-back
result.attrs = MemAttr_WB;
result.hints = MemHint_RWA;
result.transient = FALSE;
return result;) && boolean IMPLEMENTATION_DEFINED "Has 16-bit VMID";
// ShortConvertAttrsHints()
// ========================
// Converts the short attribute fields for Normal memory as used in the TTBR and
// TEX fields to orthogonal attributes and hints
// PAMax()
// =======
// Returns the IMPLEMENTATION DEFINED upper limit on the physical address
// size for this processor, as log2().
MemAttrHintsinteger ShortConvertAttrsHints(bits(2) RGN,PAMax()
return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size"; AccType acctype, boolean secondstage)
MemAttrHints result;
if (!secondstage && S1CacheDisabled(acctype)) || (secondstage && S2CacheDisabled(acctype)) then
// Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
case RGN of
when '00' // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
when '01' // Write-back, Read and Write allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RWA;
when '10' // Write-through, Read allocate
result.attrs = MemAttr_WT;
result.hints = MemHint_RA;
when '11' // Write-back, Read allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RA;
result.transient = FALSE;
return result;
// WalkAttrDecode()
// ================
// S1TranslationRegime()
// =====================
// Stage 1 translation regime for the given Exception level
MemoryAttributesbits(2) WalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage)S1TranslationRegime(bits(2) el)
if el !=
MemoryAttributesEL0 memattrs;then
return el;
elsif
AccTypeHaveEL acctype =( AccType_NORMALEL3;
memattrs.memtype =) && MemType_NormalELUsingAArch32;
memattrs.inner =( ShortConvertAttrsHintsEL3(IRGN, acctype, secondstage);
memattrs.outer =) && SCR.NS == '0' then
return ShortConvertAttrsHintsEL3(ORGN, acctype, secondstage);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = FALSE;
return;
elsif () && ELIsInHost(el) then
return EL2;
else
return EL1;
// S1TranslationRegime()
// =====================
// Returns the Exception level controlling the current Stage 1 translation regime. For the most
// part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly
// return the correct value.
bits(2) S1TranslationRegime()
return S1TranslationRegimeMemAttrDefaultsHaveVirtHostExt(memattrs);(PSTATE.EL);
// HasS2Translation()
// ==================
// Returns TRUE if stage 2 translation is present for the current translation regime
// VAMax()
// =======
// Returns the IMPLEMENTATION DEFINED upper limit on the virtual address
// size for this processor, as log2().
booleaninteger HasS2Translation()
return (VAMax()
return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size";EL2Enabled() && !IsInHost() && PSTATE.EL IN {EL0,EL1});
// Have16bitVMID() // =============== // Returns TRUE if EL2 and support for a 16-bit VMID are implemented. boolean Have16bitVMID() return HaveEL(EL2) && boolean IMPLEMENTATION_DEFINED "Has 16-bit VMID";
// PAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the physical address // size for this processor, as log2(). integer PAMax() return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size";
// S1TranslationRegime() // ===================== // Stage 1 translation regime for the given Exception level bits(2) S1TranslationRegime(bits(2) el) if el != EL0 then return el; elsif HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.NS == '0' then return EL3; elsif HaveVirtHostExt() && ELIsInHost(el) then return EL2; else return EL1; // S1TranslationRegime() // ===================== // Returns the Exception level controlling the current Stage 1 translation regime. For the most // part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly // return the correct value. bits(2) S1TranslationRegime() return S1TranslationRegime(PSTATE.EL);
// VAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the virtual address // size for this processor, as log2(). integer VAMax() return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size";
Internal version only: isa v01_24v01_19, pseudocode v2020-12v2020-09_xml, sve v2020-12-3-g87778bbv2020-09_rc3
; Build timestamp: 2020-12-17T152020-09-30T21:2035
Copyright © 2010-2020 Arm Limited or its affiliates. All rights reserved. This document is Non-Confidential.
| (old) | htmldiff from- | (new) |